primellm 0.1.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts ADDED
@@ -0,0 +1,411 @@
1
+ #!/usr/bin/env node
2
+
3
+ import chalk from 'chalk';
4
+ import figlet from 'figlet';
5
+ import ora from 'ora';
6
+ import { execSync, spawnSync } from 'child_process';
7
+ import { platform, homedir } from 'os';
8
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
9
+ import { join } from 'path';
10
+ import open from 'open';
11
+ import Enquirer from 'enquirer';
12
+
13
+ const { prompt } = Enquirer;
14
+
15
+ // ============================================================================
16
+ // CONSTANTS
17
+ // ============================================================================
18
+
19
+ const PRIMELLM_BASE_URL = 'https://api.primellm.in/v1';
20
+ const PRIMELLM_DASHBOARD_URL = 'https://primellm.in/dashboard/api-keys';
21
+
22
+ // Tool installation packages
23
+ const TOOLS = {
24
+ 'Claude Code': {
25
+ command: 'claude',
26
+ package: '@anthropic-ai/claude-code',
27
+ configName: 'claude',
28
+ },
29
+ 'Codex': {
30
+ command: 'codex',
31
+ package: '@openai/codex',
32
+ configName: 'codex',
33
+ },
34
+ } as const;
35
+
36
+ type ToolName = keyof typeof TOOLS;
37
+
38
+ // ============================================================================
39
+ // SYSTEM DETECTION
40
+ // ============================================================================
41
+
42
+ function getOS(): string {
43
+ const os = platform();
44
+ switch (os) {
45
+ case 'darwin': return 'macOS';
46
+ case 'linux': return 'Linux';
47
+ case 'win32': return 'Windows';
48
+ default: return os;
49
+ }
50
+ }
51
+
52
+ function getShell(): string {
53
+ const shell = process.env.SHELL || process.env.COMSPEC || 'unknown';
54
+ const shellName = shell.split(/[/\\]/).pop() || 'unknown';
55
+ return shellName;
56
+ }
57
+
58
+ function getNodeVersion(): string {
59
+ return process.version;
60
+ }
61
+
62
+ function commandExists(cmd: string): boolean {
63
+ try {
64
+ const isWindows = platform() === 'win32';
65
+ const checkCmd = isWindows ? `where ${cmd}` : `which ${cmd}`;
66
+ execSync(checkCmd, { stdio: 'ignore' });
67
+ return true;
68
+ } catch {
69
+ return false;
70
+ }
71
+ }
72
+
73
+ // ============================================================================
74
+ // ASCII ART & DISPLAY
75
+ // ============================================================================
76
+
77
+ function displayBanner(): void {
78
+ console.clear();
79
+
80
+ const banner = figlet.textSync('PrimeLLM', {
81
+ font: 'Big',
82
+ horizontalLayout: 'default',
83
+ });
84
+
85
+ console.log(chalk.cyan(banner));
86
+ console.log(chalk.gray('━'.repeat(60)));
87
+ console.log(chalk.white.bold(' Configure AI coding tools with PrimeLLM'));
88
+ console.log(chalk.gray('━'.repeat(60)));
89
+ console.log();
90
+ }
91
+
92
+ function displaySystemInfo(): void {
93
+ console.log(chalk.white.bold('📊 System Information\n'));
94
+
95
+ const os = getOS();
96
+ const shell = getShell();
97
+ const nodeVersion = getNodeVersion();
98
+
99
+ console.log(` ${chalk.gray('OS:')} ${chalk.green(os)}`);
100
+ console.log(` ${chalk.gray('Shell:')} ${chalk.green(shell)}`);
101
+ console.log(` ${chalk.gray('Node:')} ${chalk.green(nodeVersion)}`);
102
+ console.log();
103
+ }
104
+
105
+ // ============================================================================
106
+ // CONFIGURATION PATHS
107
+ // ============================================================================
108
+
109
+ function getConfigPaths(tool: ToolName, scope: 'system' | 'project'): { dir: string; file: string } {
110
+ const home = homedir();
111
+
112
+ if (tool === 'Claude Code') {
113
+ if (scope === 'system') {
114
+ return {
115
+ dir: join(home, '.claude'),
116
+ file: join(home, '.claude', 'config.json'),
117
+ };
118
+ } else {
119
+ return {
120
+ dir: '.claude',
121
+ file: join('.claude', 'config.json'),
122
+ };
123
+ }
124
+ } else {
125
+ // Codex uses OpenAI-compatible config
126
+ if (scope === 'system') {
127
+ return {
128
+ dir: join(home, '.codex'),
129
+ file: join(home, '.codex', 'config.json'),
130
+ };
131
+ } else {
132
+ return {
133
+ dir: '.codex',
134
+ file: join('.codex', 'config.json'),
135
+ };
136
+ }
137
+ }
138
+ }
139
+
140
+ interface Config {
141
+ baseUrl?: string;
142
+ apiKey?: string;
143
+ [key: string]: unknown;
144
+ }
145
+
146
+ function readExistingConfig(configPath: string): Config | null {
147
+ try {
148
+ if (existsSync(configPath)) {
149
+ const content = readFileSync(configPath, 'utf8');
150
+ return JSON.parse(content);
151
+ }
152
+ } catch {
153
+ // Config doesn't exist or is invalid
154
+ }
155
+ return null;
156
+ }
157
+
158
+ function writeConfig(tool: ToolName, scope: 'system' | 'project', apiKey: string): void {
159
+ const { dir, file } = getConfigPaths(tool, scope);
160
+
161
+ // Create directory if it doesn't exist
162
+ if (!existsSync(dir)) {
163
+ mkdirSync(dir, { recursive: true });
164
+ }
165
+
166
+ let config: Config;
167
+
168
+ if (tool === 'Claude Code') {
169
+ // Claude Code configuration format
170
+ config = {
171
+ apiKey: apiKey,
172
+ baseUrl: PRIMELLM_BASE_URL,
173
+ model: 'claude-sonnet-4-20250514', // Default model
174
+ };
175
+ } else {
176
+ // Codex (OpenAI-compatible) configuration format
177
+ config = {
178
+ apiKey: apiKey,
179
+ baseUrl: PRIMELLM_BASE_URL,
180
+ model: 'gpt-4.1', // Default model
181
+ };
182
+ }
183
+
184
+ // Preserve existing config values
185
+ const existing = readExistingConfig(file);
186
+ if (existing) {
187
+ config = { ...existing, ...config };
188
+ }
189
+
190
+ writeFileSync(file, JSON.stringify(config, null, 2));
191
+ }
192
+
193
+ // ============================================================================
194
+ // TOOL INSTALLATION
195
+ // ============================================================================
196
+
197
+ async function installTool(tool: ToolName): Promise<boolean> {
198
+ const toolInfo = TOOLS[tool];
199
+ const spinner = ora(`Installing ${tool}...`).start();
200
+
201
+ try {
202
+ const isWindows = platform() === 'win32';
203
+ const npmCmd = isWindows ? 'npm.cmd' : 'npm';
204
+
205
+ const result = spawnSync(npmCmd, ['install', '-g', toolInfo.package], {
206
+ stdio: 'pipe',
207
+ shell: true,
208
+ });
209
+
210
+ if (result.status !== 0) {
211
+ spinner.fail(chalk.red(`Failed to install ${tool}`));
212
+ console.error(chalk.gray(result.stderr?.toString() || 'Unknown error'));
213
+ return false;
214
+ }
215
+
216
+ spinner.succeed(chalk.green(`${tool} installed successfully`));
217
+ return true;
218
+ } catch (error) {
219
+ spinner.fail(chalk.red(`Failed to install ${tool}`));
220
+ console.error(chalk.gray(String(error)));
221
+ return false;
222
+ }
223
+ }
224
+
225
+ // ============================================================================
226
+ // VALIDATION
227
+ // ============================================================================
228
+
229
+ function validateApiKey(key: string): boolean {
230
+ return key.startsWith('primellm_') && key.length > 15;
231
+ }
232
+
233
+ function maskApiKey(key: string): string {
234
+ if (key.length <= 12) return '***';
235
+ return key.substring(0, 12) + '...' + key.substring(key.length - 4);
236
+ }
237
+
238
+ // ============================================================================
239
+ // MAIN FLOW
240
+ // ============================================================================
241
+
242
+ async function main(): Promise<void> {
243
+ displayBanner();
244
+ displaySystemInfo();
245
+
246
+ const spinner = ora('Initializing...').start();
247
+ await new Promise(resolve => setTimeout(resolve, 500));
248
+ spinner.succeed('Ready');
249
+ console.log();
250
+
251
+ // ============ STEP 1: Tool Selection ============
252
+ console.log(chalk.white.bold('🔧 Tool Selection\n'));
253
+
254
+ const { selectedTool } = await prompt<{ selectedTool: ToolName }>({
255
+ type: 'select',
256
+ name: 'selectedTool',
257
+ message: 'Which tool would you like to configure?',
258
+ choices: ['Claude Code', 'Codex'],
259
+ });
260
+
261
+ console.log();
262
+
263
+ // ============ STEP 2: Tool Detection & Installation ============
264
+ const toolInfo = TOOLS[selectedTool];
265
+ const toolExists = commandExists(toolInfo.command);
266
+
267
+ if (toolExists) {
268
+ console.log(chalk.green(`✓ ${selectedTool} is already installed`));
269
+ } else {
270
+ console.log(chalk.yellow(`⚠ ${selectedTool} is not installed`));
271
+
272
+ const { confirmInstall } = await prompt<{ confirmInstall: boolean }>({
273
+ type: 'confirm',
274
+ name: 'confirmInstall',
275
+ message: `Would you like to install ${selectedTool}?`,
276
+ initial: true,
277
+ });
278
+
279
+ if (!confirmInstall) {
280
+ console.log(chalk.yellow('\n⚠ Installation cancelled. Please install manually and run again.'));
281
+ process.exit(0);
282
+ }
283
+
284
+ const installed = await installTool(selectedTool);
285
+ if (!installed) {
286
+ console.log(chalk.red('\n❌ Installation failed. Please install manually and run again.'));
287
+ process.exit(1);
288
+ }
289
+ }
290
+
291
+ console.log();
292
+
293
+ // ============ STEP 3: API Key ============
294
+ console.log(chalk.white.bold('🔑 API Key Configuration\n'));
295
+
296
+ const { hasApiKey } = await prompt<{ hasApiKey: string }>({
297
+ type: 'select',
298
+ name: 'hasApiKey',
299
+ message: 'Do you already have a PrimeLLM API key?',
300
+ choices: [
301
+ { name: 'yes', message: 'Yes, I have an API key' },
302
+ { name: 'no', message: 'No, I need to create one' },
303
+ ],
304
+ });
305
+
306
+ let apiKey: string;
307
+
308
+ if (hasApiKey === 'no') {
309
+ console.log(chalk.cyan('\n📝 Opening PrimeLLM dashboard to create an API key...\n'));
310
+
311
+ await open(PRIMELLM_DASHBOARD_URL);
312
+
313
+ await prompt({
314
+ type: 'confirm',
315
+ name: 'ready',
316
+ message: 'Press Enter when you have created your API key...',
317
+ });
318
+ }
319
+
320
+ // Prompt for API key
321
+ const { inputApiKey } = await prompt<{ inputApiKey: string }>({
322
+ type: 'password',
323
+ name: 'inputApiKey',
324
+ message: 'Enter your PrimeLLM API key:',
325
+ validate: (value: string) => {
326
+ if (!value) return 'API key is required';
327
+ if (!validateApiKey(value)) return 'API key must start with "primellm_"';
328
+ return true;
329
+ },
330
+ });
331
+
332
+ apiKey = inputApiKey;
333
+ console.log(chalk.green(`✓ API key validated: ${maskApiKey(apiKey)}`));
334
+ console.log();
335
+
336
+ // ============ STEP 4: Configuration Scope ============
337
+ console.log(chalk.white.bold('📁 Configuration Scope\n'));
338
+
339
+ const { configScope } = await prompt<{ configScope: 'system' | 'project' }>({
340
+ type: 'select',
341
+ name: 'configScope',
342
+ message: 'Choose configuration scope:',
343
+ choices: [
344
+ { name: 'system', message: 'System-level (applies to all projects)' },
345
+ { name: 'project', message: 'Project-level (current project only)' },
346
+ ],
347
+ });
348
+
349
+ console.log();
350
+
351
+ // ============ STEP 5: Check Existing Config ============
352
+ const { file: configFile } = getConfigPaths(selectedTool, configScope);
353
+ const existingConfig = readExistingConfig(configFile);
354
+
355
+ if (existingConfig) {
356
+ console.log(chalk.yellow('⚠ Existing configuration found:\n'));
357
+ console.log(chalk.gray(` Base URL: ${existingConfig.baseUrl || '(not set)'}`));
358
+ console.log(chalk.gray(` API Key: ${existingConfig.apiKey ? maskApiKey(existingConfig.apiKey) : '(not set)'}`));
359
+ console.log();
360
+
361
+ const { confirmOverwrite } = await prompt<{ confirmOverwrite: boolean }>({
362
+ type: 'confirm',
363
+ name: 'confirmOverwrite',
364
+ message: 'Do you want to overwrite this configuration?',
365
+ initial: true,
366
+ });
367
+
368
+ if (!confirmOverwrite) {
369
+ console.log(chalk.yellow('\n⚠ Configuration cancelled.'));
370
+ process.exit(0);
371
+ }
372
+ console.log();
373
+ }
374
+
375
+ // ============ STEP 6: Write Configuration ============
376
+ const writeSpinner = ora('Writing configuration...').start();
377
+
378
+ try {
379
+ writeConfig(selectedTool, configScope, apiKey);
380
+ writeSpinner.succeed(chalk.green('Configuration saved'));
381
+ } catch (error) {
382
+ writeSpinner.fail(chalk.red('Failed to write configuration'));
383
+ console.error(chalk.gray(String(error)));
384
+ process.exit(1);
385
+ }
386
+
387
+ // ============ SUCCESS ============
388
+ console.log();
389
+ console.log(chalk.gray('━'.repeat(60)));
390
+ console.log();
391
+ console.log(chalk.green.bold('✅ PrimeLLM configured successfully!'));
392
+ console.log();
393
+ console.log(chalk.white('You can now use:'));
394
+ console.log(chalk.cyan(` ${toolInfo.command}`));
395
+ console.log(chalk.white('with PrimeLLM as the backend.'));
396
+ console.log();
397
+ console.log(chalk.gray(`Config location: ${configFile}`));
398
+ console.log(chalk.gray(`Base URL: ${PRIMELLM_BASE_URL}`));
399
+ console.log();
400
+ console.log(chalk.gray('━'.repeat(60)));
401
+ console.log();
402
+ console.log(chalk.white('Need help? Visit ') + chalk.cyan('https://primellm.in/docs'));
403
+ console.log();
404
+ }
405
+
406
+ // Run the CLI
407
+ main().catch((error) => {
408
+ console.error(chalk.red('An unexpected error occurred:'));
409
+ console.error(chalk.gray(String(error)));
410
+ process.exit(1);
411
+ });
package/tsconfig.json ADDED
@@ -0,0 +1,27 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2022",
4
+ "module": "NodeNext",
5
+ "moduleResolution": "NodeNext",
6
+ "lib": [
7
+ "ES2022"
8
+ ],
9
+ "outDir": "./dist",
10
+ "rootDir": "./src",
11
+ "strict": true,
12
+ "esModuleInterop": true,
13
+ "skipLibCheck": true,
14
+ "forceConsistentCasingInFileNames": true,
15
+ "resolveJsonModule": true,
16
+ "declaration": true,
17
+ "declarationMap": true,
18
+ "sourceMap": true
19
+ },
20
+ "include": [
21
+ "src/**/*"
22
+ ],
23
+ "exclude": [
24
+ "node_modules",
25
+ "dist"
26
+ ]
27
+ }
package/dist/types.d.ts DELETED
@@ -1,141 +0,0 @@
1
- /**
2
- * PrimeLLM SDK Types
3
- *
4
- * This file contains all the TypeScript types used by the PrimeLLM SDK.
5
- * These types match the response format from the PrimeLLM API.
6
- *
7
- * Think of types like "templates" that describe what data looks like.
8
- * They help catch errors before your code runs!
9
- */
10
- /**
11
- * The role of a message in a conversation.
12
- * - "system": Instructions for the AI (like "be helpful")
13
- * - "user": Messages from the human user
14
- * - "assistant": Messages from the AI
15
- */
16
- export type ChatRole = "system" | "user" | "assistant";
17
- /**
18
- * A single message in a conversation.
19
- * Each message has a role (who said it) and content (what they said).
20
- */
21
- export interface ChatMessage {
22
- role: ChatRole;
23
- content: string;
24
- }
25
- /**
26
- * Request body for the /v1/chat and /v1/chat/completions endpoints.
27
- * This is what you send TO the API.
28
- */
29
- export interface ChatRequest {
30
- /** Model name, e.g. "gpt-5.1", "claude-sonnet-4.5", "gemini-3.0" */
31
- model: string;
32
- /** List of messages in the conversation */
33
- messages: ChatMessage[];
34
- /** Whether to stream the response (not supported yet) */
35
- stream?: boolean;
36
- /** Optional extra data to pass along */
37
- metadata?: Record<string, unknown>;
38
- /** Temperature for randomness (0.0 = focused, 1.0 = creative) */
39
- temperature?: number;
40
- /** Maximum tokens to generate */
41
- max_tokens?: number;
42
- }
43
- /**
44
- * Request body for the /generate endpoint (legacy).
45
- * This endpoint uses a simpler "prompt" format instead of messages.
46
- */
47
- export interface GenerateRequest {
48
- /** Model name, e.g. "gpt-5.1" */
49
- model: string;
50
- /** The messages to send (same as ChatRequest) */
51
- messages: ChatMessage[];
52
- /** Maximum tokens to generate */
53
- max_tokens?: number;
54
- /** Temperature for randomness */
55
- temperature?: number;
56
- /** Whether to stream (not supported yet) */
57
- stream?: boolean;
58
- /** Optional extra data */
59
- metadata?: Record<string, unknown>;
60
- }
61
- /**
62
- * A single "choice" in the API response.
63
- * The API can return multiple choices, but usually returns just one.
64
- */
65
- export interface ChatChoice {
66
- /** Index of this choice (usually 0) */
67
- index: number;
68
- /** The AI's response message */
69
- message: ChatMessage;
70
- /** Why the AI stopped: "stop" means it finished normally */
71
- finish_reason?: string | null;
72
- }
73
- /**
74
- * Token usage information.
75
- * Tokens are like "word pieces" - the AI counts usage in tokens.
76
- */
77
- export interface Usage {
78
- /** Tokens used by your input (prompt) */
79
- prompt_tokens: number;
80
- /** Tokens used by the AI's response */
81
- completion_tokens: number;
82
- /** Total tokens = prompt + completion */
83
- total_tokens: number;
84
- }
85
- /**
86
- * Credit information from your PrimeLLM account.
87
- * Credits are like "money" - each API call costs some credits.
88
- */
89
- export interface CreditsInfo {
90
- /** How many credits you have left */
91
- remaining: number;
92
- /** How much this request cost */
93
- cost?: number;
94
- }
95
- /**
96
- * The full response from /v1/chat or /v1/chat/completions.
97
- * This matches the OpenAI response format.
98
- */
99
- export interface ChatResponse {
100
- /** Unique ID for this response */
101
- id: string;
102
- /** Which model was used */
103
- model: string;
104
- /** When this was created (Unix timestamp in seconds) */
105
- created: number;
106
- /** Type of object (always "chat.completion") */
107
- object?: string;
108
- /** The AI's response(s) */
109
- choices: ChatChoice[];
110
- /** Token usage information */
111
- usage: Usage;
112
- /** Your credit balance (PrimeLLM-specific) */
113
- credits?: CreditsInfo;
114
- }
115
- /**
116
- * Response from the /generate endpoint (legacy format).
117
- */
118
- export interface GenerateResponse {
119
- /** The AI's reply text */
120
- reply: string;
121
- /** Which model was used */
122
- model: string;
123
- /** Total tokens used */
124
- tokens_used: number;
125
- /** Cost of this request */
126
- cost: number;
127
- /** Credits remaining in your account */
128
- credits_remaining: number;
129
- }
130
- /**
131
- * Options for creating a PrimeLLMClient.
132
- */
133
- export interface PrimeLLMClientOptions {
134
- /** Your PrimeLLM API key (starts with "primellm_live_") */
135
- apiKey: string;
136
- /** Base URL for the API (default: "https://api.primellm.in") */
137
- baseURL?: string;
138
- /** Request timeout in milliseconds (default: 60000 = 60 seconds) */
139
- timeoutMs?: number;
140
- }
141
- //# sourceMappingURL=types.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAMH;;;;;GAKG;AACH,MAAM,MAAM,QAAQ,GAAG,QAAQ,GAAG,MAAM,GAAG,WAAW,CAAC;AAEvD;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;CACnB;AAMD;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,oEAAoE;IACpE,KAAK,EAAE,MAAM,CAAC;IAEd,2CAA2C;IAC3C,QAAQ,EAAE,WAAW,EAAE,CAAC;IAExB,yDAAyD;IACzD,MAAM,CAAC,EAAE,OAAO,CAAC;IAEjB,wCAAwC;IACxC,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAEnC,iEAAiE;IACjE,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB,iCAAiC;IACjC,UAAU,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;;GAGG;AACH,MAAM,WAAW,eAAe;IAC5B,iCAAiC;IACjC,KAAK,EAAE,MAAM,CAAC;IAEd,iDAAiD;IACjD,QAAQ,EAAE,WAAW,EAAE,CAAC;IAExB,iCAAiC;IACjC,UAAU,CAAC,EAAE,MAAM,CAAC;IAEpB,iCAAiC;IACjC,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB,4CAA4C;IAC5C,MAAM,CAAC,EAAE,OAAO,CAAC;IAEjB,0BAA0B;IAC1B,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACtC;AAMD;;;GAGG;AACH,MAAM,WAAW,UAAU;IACvB,uCAAuC;IACvC,KAAK,EAAE,MAAM,CAAC;IAEd,gCAAgC;IAChC,OAAO,EAAE,WAAW,CAAC;IAErB,4DAA4D;IAC5D,aAAa,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;CACjC;AAED;;;GAGG;AACH,MAAM,WAAW,KAAK;IAClB,yCAAyC;IACzC,aAAa,EAAE,MAAM,CAAC;IAEtB,uCAAuC;IACvC,iBAAiB,EAAE,MAAM,CAAC;IAE1B,yCAAyC;IACzC,YAAY,EAAE,MAAM,CAAC;CACxB;AAED;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAC;IAElB,iCAAiC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC;CACjB;AAED;;;GAGG;AACH,MAAM,WAAW,YAAY;IACzB,kCAAkC;IAClC,EAAE,EAAE,MAAM,CAAC;IAEX,2BAA2B;IAC3B,KAAK,EAAE,MAAM,CAAC;IAEd,wDAAwD;IACxD,OAAO,EAAE,MAAM,CAAC;IAEhB,gDAAgD;IAChD,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB,2BAA2B;IAC3B,OAAO,EAAE,UAAU,EAAE,CAAC;IAEtB,8BAA8B;IAC9B,KAAK,EAAE,KAAK,CAAC;IAEb,8CAA8C;IAC9C,OAAO,CAAC,EAAE,WAAW,CAAC;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC7B,0BAA0B;IAC1B,KAAK,EAAE,MAAM,CAAC;IAEd,2BAA2B;IAC3B,KAAK,EAAE,MAAM,CAAC;IAEd,wBAAwB;IACxB,WAAW,EAAE,MAAM,CAAC;IAEpB,2BAA2B;IAC3B,IAAI,EAAE,MAAM,CAAC;IAEb,wCAAwC;IACxC,iBAAiB,EAAE,MAAM,CAAC;CAC7B;AAMD;;GAEG;AACH,MAAM,WAAW,qBAAqB;IAClC,2DAA2D;IAC3D,MAAM,EAAE,MAAM,CAAC;IAEf,gEAAgE;IAChE,OAAO,CAAC,EAAE,MAAM,CAAC;IAEjB,oEAAoE;IACpE,SAAS,CAAC,EAAE,MAAM,CAAC;CACtB"}
package/dist/types.js DELETED
@@ -1,10 +0,0 @@
1
- /**
2
- * PrimeLLM SDK Types
3
- *
4
- * This file contains all the TypeScript types used by the PrimeLLM SDK.
5
- * These types match the response format from the PrimeLLM API.
6
- *
7
- * Think of types like "templates" that describe what data looks like.
8
- * They help catch errors before your code runs!
9
- */
10
- export {};