primellm 0.2.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +67 -97
- package/dist/index.d.ts +2 -147
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +324 -253
- package/dist/index.js.map +1 -0
- package/package.json +32 -31
- package/src/index.ts +406 -0
- package/tsconfig.json +27 -0
- package/cli/index.js +0 -34
- package/dist/errors.d.ts +0 -59
- package/dist/errors.d.ts.map +0 -1
- package/dist/errors.js +0 -93
- package/dist/streaming.d.ts +0 -29
- package/dist/streaming.d.ts.map +0 -1
- package/dist/streaming.js +0 -64
- package/dist/tokenizer.d.ts +0 -42
- package/dist/tokenizer.d.ts.map +0 -1
- package/dist/tokenizer.js +0 -61
- package/dist/types.d.ts +0 -172
- package/dist/types.d.ts.map +0 -1
- package/dist/types.js +0 -7
package/README.md
CHANGED
|
@@ -1,127 +1,97 @@
|
|
|
1
|
-
# PrimeLLM
|
|
1
|
+
# PrimeLLM CLI
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
> Configure Claude Code and Codex to use PrimeLLM as the backend
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
A production-grade CLI installer that configures AI coding tools to use PrimeLLM's unified API.
|
|
6
6
|
|
|
7
|
-
##
|
|
7
|
+
## Quick Start
|
|
8
8
|
|
|
9
9
|
```bash
|
|
10
|
-
|
|
10
|
+
npx primellm
|
|
11
11
|
```
|
|
12
12
|
|
|
13
|
-
##
|
|
13
|
+
## Features
|
|
14
14
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
// Send a chat message
|
|
24
|
-
const response = await client.chat({
|
|
25
|
-
model: "gpt-5.1",
|
|
26
|
-
messages: [
|
|
27
|
-
{ role: "system", content: "You are a helpful assistant." },
|
|
28
|
-
{ role: "user", content: "What is TypeScript?" },
|
|
29
|
-
],
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
// Access the response
|
|
33
|
-
console.log(response.choices[0].message.content);
|
|
34
|
-
console.log("Tokens used:", response.usage.total_tokens);
|
|
35
|
-
console.log("Credits left:", response.credits.remaining);
|
|
36
|
-
```
|
|
15
|
+
- 🎨 **Beautiful UI** - ASCII art banner and progress indicators
|
|
16
|
+
- 🔍 **System Detection** - Automatically detects OS, shell, and Node version
|
|
17
|
+
- 🛠 **Tool Selection** - Choose between Claude Code or Codex
|
|
18
|
+
- 📦 **Smart Installation** - Only installs tools if not already present
|
|
19
|
+
- 🔑 **Secure API Key Flow** - Masked input with validation
|
|
20
|
+
- 📁 **Flexible Scope** - System-level or project-level configuration
|
|
21
|
+
|
|
22
|
+
## Supported Tools
|
|
37
23
|
|
|
38
|
-
|
|
24
|
+
| Tool | Package | Command |
|
|
25
|
+
|------|---------|---------|
|
|
26
|
+
| Claude Code | `@anthropic-ai/claude-code` | `claude` |
|
|
27
|
+
| Codex | `@openai/codex` | `codex` |
|
|
39
28
|
|
|
40
|
-
|
|
41
|
-
|-------|-------------|
|
|
42
|
-
| `gpt-5.1` | Latest GPT model (default) |
|
|
43
|
-
| `claude-sonnet-4.5` | Claude Sonnet 4.5 |
|
|
44
|
-
| `gemini-3.0` | Gemini 3.0 |
|
|
29
|
+
## Usage
|
|
45
30
|
|
|
46
|
-
|
|
31
|
+
### Interactive Mode
|
|
47
32
|
|
|
48
|
-
|
|
33
|
+
Simply run the CLI and follow the prompts:
|
|
49
34
|
|
|
50
|
-
```
|
|
51
|
-
|
|
52
|
-
apiKey: "primellm_XXX", // Required
|
|
53
|
-
baseURL: "https://api.primellm.in", // Optional, this is the default
|
|
54
|
-
timeoutMs: 60000, // Optional, 60 seconds default
|
|
55
|
-
});
|
|
35
|
+
```bash
|
|
36
|
+
npx primellm
|
|
56
37
|
```
|
|
57
38
|
|
|
58
|
-
###
|
|
39
|
+
### What it Does
|
|
59
40
|
|
|
60
|
-
|
|
41
|
+
1. **Detects your system** - Shows OS, shell, and Node version
|
|
42
|
+
2. **Asks which tool** - Claude Code or Codex
|
|
43
|
+
3. **Checks installation** - Skips install if already present
|
|
44
|
+
4. **Gets your API key** - Opens browser if you need to create one
|
|
45
|
+
5. **Configures the tool** - Writes config with PrimeLLM backend
|
|
61
46
|
|
|
62
|
-
|
|
63
|
-
const response = await client.chat({
|
|
64
|
-
model: "gpt-5.1",
|
|
65
|
-
messages: [
|
|
66
|
-
{ role: "user", content: "Hello!" }
|
|
67
|
-
],
|
|
68
|
-
temperature: 0.7, // Optional
|
|
69
|
-
max_tokens: 1000, // Optional
|
|
70
|
-
});
|
|
71
|
-
```
|
|
47
|
+
## Configuration
|
|
72
48
|
|
|
73
|
-
|
|
74
|
-
```javascript
|
|
75
|
-
{
|
|
76
|
-
id: "chatcmpl_xxx",
|
|
77
|
-
model: "gpt-5.1",
|
|
78
|
-
choices: [{
|
|
79
|
-
index: 0,
|
|
80
|
-
message: { role: "assistant", content: "..." },
|
|
81
|
-
finish_reason: "stop"
|
|
82
|
-
}],
|
|
83
|
-
usage: {
|
|
84
|
-
prompt_tokens: 10,
|
|
85
|
-
completion_tokens: 20,
|
|
86
|
-
total_tokens: 30
|
|
87
|
-
},
|
|
88
|
-
credits: {
|
|
89
|
-
remaining: 149.99,
|
|
90
|
-
cost: 0.00006
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
```
|
|
49
|
+
### System-level (recommended)
|
|
94
50
|
|
|
95
|
-
|
|
51
|
+
Applies to all projects. Config stored in:
|
|
52
|
+
- Claude Code: `~/.claude/config.json`
|
|
53
|
+
- Codex: `~/.codex/config.json`
|
|
96
54
|
|
|
97
|
-
|
|
55
|
+
### Project-level
|
|
98
56
|
|
|
99
|
-
|
|
100
|
-
|
|
57
|
+
Applies to current project only. Config stored in:
|
|
58
|
+
- Claude Code: `./.claude/config.json`
|
|
59
|
+
- Codex: `./.codex/config.json`
|
|
101
60
|
|
|
102
|
-
|
|
61
|
+
## API Key
|
|
103
62
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
};
|
|
63
|
+
Your PrimeLLM API key:
|
|
64
|
+
- Must start with `primellm_`
|
|
65
|
+
- Can be created at: https://primellm.in/dashboard/api-keys
|
|
108
66
|
|
|
109
|
-
|
|
110
|
-
```
|
|
67
|
+
## Requirements
|
|
111
68
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
69
|
+
- Node.js >= 18.0.0
|
|
70
|
+
- npm or npx
|
|
71
|
+
|
|
72
|
+
## Development
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
# Clone the repo
|
|
76
|
+
git clone https://github.com/rishuuu-codesss/primellm-backend.git
|
|
77
|
+
cd primellm-backend/primellm-cli
|
|
78
|
+
|
|
79
|
+
# Install dependencies
|
|
80
|
+
npm install
|
|
81
|
+
|
|
82
|
+
# Run in development
|
|
83
|
+
npm run dev
|
|
84
|
+
|
|
85
|
+
# Build
|
|
86
|
+
npm run build
|
|
123
87
|
```
|
|
124
88
|
|
|
125
89
|
## License
|
|
126
90
|
|
|
127
91
|
MIT
|
|
92
|
+
|
|
93
|
+
## Links
|
|
94
|
+
|
|
95
|
+
- [PrimeLLM Website](https://primellm.in)
|
|
96
|
+
- [API Documentation](https://primellm.in/docs)
|
|
97
|
+
- [Dashboard](https://primellm.in/dashboard)
|
package/dist/index.d.ts
CHANGED
|
@@ -1,148 +1,3 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
*
|
|
4
|
-
* Production-grade SDK with streaming, retries, and full API parity.
|
|
5
|
-
*
|
|
6
|
-
* @example
|
|
7
|
-
* import PrimeLLM from "primellm";
|
|
8
|
-
*
|
|
9
|
-
* const client = new PrimeLLM({ apiKey: "primellm_XXX" });
|
|
10
|
-
* const response = await client.chat({
|
|
11
|
-
* model: "gpt-5.1",
|
|
12
|
-
* messages: [{ role: "user", content: "Hello!" }],
|
|
13
|
-
* });
|
|
14
|
-
* console.log(response.choices[0].message.content);
|
|
15
|
-
*/
|
|
16
|
-
import { ChatRequest, ChatResponse, Message, PrimeLLMClientOptions, EmbeddingsRequest, EmbeddingsResponse, ModelsResponse, CreditsResponse, KeysResponse, KeyCreateResponse } from "./types.js";
|
|
17
|
-
import { StreamChunk } from "./streaming.js";
|
|
18
|
-
export * from "./types.js";
|
|
19
|
-
export * from "./errors.js";
|
|
20
|
-
export { countTokens, setTokenizerAdapter } from "./tokenizer.js";
|
|
21
|
-
export { StreamChunk } from "./streaming.js";
|
|
22
|
-
/**
|
|
23
|
-
* PrimeLLM API Client
|
|
24
|
-
*
|
|
25
|
-
* Production-grade client with streaming, retries, and full API access.
|
|
26
|
-
*/
|
|
27
|
-
export declare class PrimeLLM {
|
|
28
|
-
private apiKey;
|
|
29
|
-
private baseURL;
|
|
30
|
-
private timeoutMs;
|
|
31
|
-
private retry;
|
|
32
|
-
embeddings: EmbeddingsClient;
|
|
33
|
-
models: ModelsClient;
|
|
34
|
-
keys: KeysClient;
|
|
35
|
-
credits: CreditsClient;
|
|
36
|
-
tokens: TokensClient;
|
|
37
|
-
chat: ChatClient;
|
|
38
|
-
/**
|
|
39
|
-
* Create a new PrimeLLM client.
|
|
40
|
-
*
|
|
41
|
-
* @param options - Configuration options
|
|
42
|
-
* @param options.apiKey - Your PrimeLLM API key (required)
|
|
43
|
-
* @param options.baseURL - API base URL (default: "https://api.primellm.in")
|
|
44
|
-
* @param options.timeoutMs - Request timeout in ms (default: 60000)
|
|
45
|
-
* @param options.maxRetries - Max retry attempts (default: 3)
|
|
46
|
-
*/
|
|
47
|
-
constructor(options: PrimeLLMClientOptions);
|
|
48
|
-
/**
|
|
49
|
-
* Internal HTTP request with retries and error handling
|
|
50
|
-
*/
|
|
51
|
-
request<TResponse>(path: string, body?: unknown, options?: {
|
|
52
|
-
method?: string;
|
|
53
|
-
}): Promise<TResponse>;
|
|
54
|
-
/**
|
|
55
|
-
* Internal streaming request
|
|
56
|
-
*/
|
|
57
|
-
streamRequest(path: string, body: unknown): AsyncGenerator<StreamChunk, void, unknown>;
|
|
58
|
-
}
|
|
59
|
-
/**
|
|
60
|
-
* Chat sub-client
|
|
61
|
-
*/
|
|
62
|
-
declare class ChatClient {
|
|
63
|
-
private client;
|
|
64
|
-
constructor(client: PrimeLLM);
|
|
65
|
-
/**
|
|
66
|
-
* Send a chat completion request
|
|
67
|
-
*/
|
|
68
|
-
create(request: ChatRequest): Promise<ChatResponse>;
|
|
69
|
-
/**
|
|
70
|
-
* Stream chat completion (async iterator)
|
|
71
|
-
*
|
|
72
|
-
* @example
|
|
73
|
-
* for await (const chunk of client.chat.stream({...})) {
|
|
74
|
-
* console.log(chunk.delta?.content);
|
|
75
|
-
* }
|
|
76
|
-
*/
|
|
77
|
-
stream(request: ChatRequest): AsyncGenerator<StreamChunk, void, unknown>;
|
|
78
|
-
}
|
|
79
|
-
/**
|
|
80
|
-
* Embeddings sub-client
|
|
81
|
-
*/
|
|
82
|
-
declare class EmbeddingsClient {
|
|
83
|
-
private client;
|
|
84
|
-
constructor(client: PrimeLLM);
|
|
85
|
-
/**
|
|
86
|
-
* Create embeddings for input text
|
|
87
|
-
*/
|
|
88
|
-
create(request: EmbeddingsRequest): Promise<EmbeddingsResponse>;
|
|
89
|
-
}
|
|
90
|
-
/**
|
|
91
|
-
* Models sub-client
|
|
92
|
-
*/
|
|
93
|
-
declare class ModelsClient {
|
|
94
|
-
private client;
|
|
95
|
-
constructor(client: PrimeLLM);
|
|
96
|
-
/**
|
|
97
|
-
* List available models
|
|
98
|
-
*/
|
|
99
|
-
list(): Promise<ModelsResponse>;
|
|
100
|
-
}
|
|
101
|
-
/**
|
|
102
|
-
* Keys sub-client
|
|
103
|
-
*/
|
|
104
|
-
declare class KeysClient {
|
|
105
|
-
private client;
|
|
106
|
-
constructor(client: PrimeLLM);
|
|
107
|
-
/**
|
|
108
|
-
* List API keys
|
|
109
|
-
*/
|
|
110
|
-
list(): Promise<KeysResponse>;
|
|
111
|
-
/**
|
|
112
|
-
* Create a new API key
|
|
113
|
-
*/
|
|
114
|
-
create(label?: string): Promise<KeyCreateResponse>;
|
|
115
|
-
/**
|
|
116
|
-
* Revoke an API key
|
|
117
|
-
*/
|
|
118
|
-
revoke(keyId: number): Promise<{
|
|
119
|
-
ok: boolean;
|
|
120
|
-
}>;
|
|
121
|
-
}
|
|
122
|
-
/**
|
|
123
|
-
* Credits sub-client
|
|
124
|
-
*/
|
|
125
|
-
declare class CreditsClient {
|
|
126
|
-
private client;
|
|
127
|
-
constructor(client: PrimeLLM);
|
|
128
|
-
/**
|
|
129
|
-
* Get current credit balance
|
|
130
|
-
*/
|
|
131
|
-
get(): Promise<CreditsResponse>;
|
|
132
|
-
}
|
|
133
|
-
/**
|
|
134
|
-
* Tokens sub-client (utility)
|
|
135
|
-
*/
|
|
136
|
-
declare class TokensClient {
|
|
137
|
-
/**
|
|
138
|
-
* Count tokens in text or messages
|
|
139
|
-
*/
|
|
140
|
-
count(input: string | Message[]): number;
|
|
141
|
-
/**
|
|
142
|
-
* Set custom tokenizer adapter
|
|
143
|
-
*/
|
|
144
|
-
setAdapter(adapter: ((text: string) => number) | null): void;
|
|
145
|
-
}
|
|
146
|
-
export { PrimeLLM as PrimeLLMClient };
|
|
147
|
-
export default PrimeLLM;
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
export {};
|
|
148
3
|
//# sourceMappingURL=index.d.ts.map
|
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":""}
|