primellm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +198 -0
- package/dist/index.d.ts +124 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +176 -0
- package/dist/types.d.ts +141 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +10 -0
- package/package.json +38 -0
package/README.md
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
# PrimeLLM JavaScript SDK
|
|
2
|
+
|
|
3
|
+
Official JavaScript/TypeScript SDK for the PrimeLLM unified AI API.
|
|
4
|
+
|
|
5
|
+
PrimeLLM lets you access multiple AI models (GPT-5.1, Claude, Gemini) through a single, simple API. This SDK makes it easy to call PrimeLLM from JavaScript or TypeScript.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
### Local Development (Not on npm yet)
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
cd js-sdk
|
|
13
|
+
npm install
|
|
14
|
+
npm run build
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### Future npm Usage (Coming Soon)
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install primellm
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Quick Start
|
|
24
|
+
|
|
25
|
+
```javascript
|
|
26
|
+
import PrimeLLMClient from "primellm";
|
|
27
|
+
|
|
28
|
+
// Create a client with your API key
|
|
29
|
+
const client = new PrimeLLMClient({
|
|
30
|
+
apiKey: "primellm_live_XXX", // Get from https://primellm.in/dashboard
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
// Send a chat message
|
|
34
|
+
const response = await client.chat({
|
|
35
|
+
model: "gpt-5.1",
|
|
36
|
+
messages: [
|
|
37
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
38
|
+
{ role: "user", content: "What is TypeScript?" },
|
|
39
|
+
],
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
// Access the response
|
|
43
|
+
console.log(response.choices[0].message.content);
|
|
44
|
+
console.log("Tokens used:", response.usage.total_tokens);
|
|
45
|
+
console.log("Credits left:", response.credits.remaining);
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Available Models
|
|
49
|
+
|
|
50
|
+
| Model | Description |
|
|
51
|
+
|-------|-------------|
|
|
52
|
+
| `gpt-5.1` | Latest GPT model (default) |
|
|
53
|
+
| `claude-sonnet-4.5` | Claude Sonnet 4.5 |
|
|
54
|
+
| `gemini-3.0` | Gemini 3.0 |
|
|
55
|
+
|
|
56
|
+
## API Reference
|
|
57
|
+
|
|
58
|
+
### Creating a Client
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
const client = new PrimeLLMClient({
|
|
62
|
+
apiKey: "primellm_live_XXX", // Required
|
|
63
|
+
baseURL: "https://api.primellm.in", // Optional, this is the default
|
|
64
|
+
timeoutMs: 60000, // Optional, 60 seconds default
|
|
65
|
+
});
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### client.chat(request)
|
|
69
|
+
|
|
70
|
+
Send a chat completion request to `/v1/chat`. This is the recommended method.
|
|
71
|
+
|
|
72
|
+
```javascript
|
|
73
|
+
const response = await client.chat({
|
|
74
|
+
model: "gpt-5.1",
|
|
75
|
+
messages: [
|
|
76
|
+
{ role: "user", content: "Hello!" }
|
|
77
|
+
],
|
|
78
|
+
temperature: 0.7, // Optional
|
|
79
|
+
max_tokens: 1000, // Optional
|
|
80
|
+
});
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
**Response:**
|
|
84
|
+
```javascript
|
|
85
|
+
{
|
|
86
|
+
id: "chatcmpl_xxx",
|
|
87
|
+
model: "gpt-5.1",
|
|
88
|
+
choices: [{
|
|
89
|
+
index: 0,
|
|
90
|
+
message: { role: "assistant", content: "..." },
|
|
91
|
+
finish_reason: "stop"
|
|
92
|
+
}],
|
|
93
|
+
usage: {
|
|
94
|
+
prompt_tokens: 10,
|
|
95
|
+
completion_tokens: 20,
|
|
96
|
+
total_tokens: 30
|
|
97
|
+
},
|
|
98
|
+
credits: {
|
|
99
|
+
remaining: 149.99,
|
|
100
|
+
cost: 0.00006
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### client.completions(request)
|
|
106
|
+
|
|
107
|
+
Same as `chat()`, but uses the `/v1/chat/completions` endpoint.
|
|
108
|
+
Use this for OpenAI API path compatibility.
|
|
109
|
+
|
|
110
|
+
```javascript
|
|
111
|
+
const response = await client.completions({
|
|
112
|
+
model: "claude-sonnet-4.5",
|
|
113
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
114
|
+
});
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### client.generate(request)
|
|
118
|
+
|
|
119
|
+
Legacy endpoint using `/generate`. Returns a simpler response format.
|
|
120
|
+
|
|
121
|
+
```javascript
|
|
122
|
+
const response = await client.generate({
|
|
123
|
+
model: "gpt-5.1",
|
|
124
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
// Response format is different:
|
|
128
|
+
console.log(response.reply); // The AI's response
|
|
129
|
+
console.log(response.tokens_used); // Total tokens
|
|
130
|
+
console.log(response.credits_remaining); // Credits left
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
## Examples
|
|
134
|
+
|
|
135
|
+
Run the included examples:
|
|
136
|
+
|
|
137
|
+
```bash
|
|
138
|
+
cd js-sdk
|
|
139
|
+
npm install
|
|
140
|
+
npm run build
|
|
141
|
+
|
|
142
|
+
# Edit examples to add your API key, then:
|
|
143
|
+
node ./examples/chat-basic.mjs
|
|
144
|
+
node ./examples/completions-basic.mjs
|
|
145
|
+
node ./examples/generate-basic.mjs
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
## Understanding the Response
|
|
149
|
+
|
|
150
|
+
- **model**: Which AI model generated the response
|
|
151
|
+
- **messages**: The conversation, including the AI's reply
|
|
152
|
+
- **usage**: Token counts (how much "text" was processed)
|
|
153
|
+
- `prompt_tokens`: Your input
|
|
154
|
+
- `completion_tokens`: AI's output
|
|
155
|
+
- `total_tokens`: Total
|
|
156
|
+
- **credits**: Your PrimeLLM account balance
|
|
157
|
+
- `remaining`: Credits left
|
|
158
|
+
- `cost`: Cost of this request
|
|
159
|
+
|
|
160
|
+
## TypeScript Support
|
|
161
|
+
|
|
162
|
+
This SDK is written in TypeScript and includes full type definitions.
|
|
163
|
+
|
|
164
|
+
```typescript
|
|
165
|
+
import { PrimeLLMClient, ChatRequest, ChatResponse } from "primellm";
|
|
166
|
+
|
|
167
|
+
const client = new PrimeLLMClient({ apiKey: "..." });
|
|
168
|
+
|
|
169
|
+
const request: ChatRequest = {
|
|
170
|
+
model: "gpt-5.1",
|
|
171
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
172
|
+
};
|
|
173
|
+
|
|
174
|
+
const response: ChatResponse = await client.chat(request);
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
## Error Handling
|
|
178
|
+
|
|
179
|
+
```javascript
|
|
180
|
+
try {
|
|
181
|
+
const response = await client.chat({
|
|
182
|
+
model: "gpt-5.1",
|
|
183
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
184
|
+
});
|
|
185
|
+
} catch (error) {
|
|
186
|
+
console.error("API Error:", error.message);
|
|
187
|
+
// Example: "PrimeLLM API error: 401 Unauthorized - Invalid API key"
|
|
188
|
+
}
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
## Notes
|
|
192
|
+
|
|
193
|
+
- **Streaming**: Not yet supported. Calling `streamChat()` will throw an error.
|
|
194
|
+
- **Publishing**: This SDK will be published to npm as `primellm` in a future release.
|
|
195
|
+
|
|
196
|
+
## License
|
|
197
|
+
|
|
198
|
+
MIT
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PrimeLLM JavaScript SDK - Main Client
|
|
3
|
+
*
|
|
4
|
+
* This is the main SDK file. Developers import this to talk to PrimeLLM
|
|
5
|
+
* from JavaScript or TypeScript.
|
|
6
|
+
*
|
|
7
|
+
* Example usage:
|
|
8
|
+
*
|
|
9
|
+
* import { PrimeLLMClient } from "primellm";
|
|
10
|
+
*
|
|
11
|
+
* const client = new PrimeLLMClient({ apiKey: "primellm_live_XXX" });
|
|
12
|
+
*
|
|
13
|
+
* const response = await client.chat({
|
|
14
|
+
* model: "gpt-5.1",
|
|
15
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
16
|
+
* });
|
|
17
|
+
*
|
|
18
|
+
* console.log(response.choices[0].message.content);
|
|
19
|
+
*/
|
|
20
|
+
import { ChatRequest, ChatResponse, GenerateRequest, GenerateResponse, PrimeLLMClientOptions } from "./types.js";
|
|
21
|
+
export * from "./types.js";
|
|
22
|
+
/**
|
|
23
|
+
* PrimeLLM API Client
|
|
24
|
+
*
|
|
25
|
+
* This class handles all communication with the PrimeLLM API.
|
|
26
|
+
* It provides methods for chat, completions, and the legacy generate endpoint.
|
|
27
|
+
*/
|
|
28
|
+
export declare class PrimeLLMClient {
|
|
29
|
+
private apiKey;
|
|
30
|
+
private baseURL;
|
|
31
|
+
private timeoutMs;
|
|
32
|
+
/**
|
|
33
|
+
* Create a new PrimeLLM client.
|
|
34
|
+
*
|
|
35
|
+
* @param options - Configuration options
|
|
36
|
+
* @param options.apiKey - Your PrimeLLM API key (required)
|
|
37
|
+
* @param options.baseURL - API base URL (default: "https://api.primellm.in")
|
|
38
|
+
* @param options.timeoutMs - Request timeout in ms (default: 60000)
|
|
39
|
+
*
|
|
40
|
+
* @example
|
|
41
|
+
* const client = new PrimeLLMClient({
|
|
42
|
+
* apiKey: "primellm_live_XXX",
|
|
43
|
+
* });
|
|
44
|
+
*/
|
|
45
|
+
constructor(options: PrimeLLMClientOptions);
|
|
46
|
+
/**
|
|
47
|
+
* Internal helper to make API requests.
|
|
48
|
+
* Handles authentication, JSON parsing, and error handling.
|
|
49
|
+
*/
|
|
50
|
+
private request;
|
|
51
|
+
/**
|
|
52
|
+
* Send a chat completion request using /v1/chat endpoint.
|
|
53
|
+
*
|
|
54
|
+
* This is the recommended method for most use cases.
|
|
55
|
+
* Returns an OpenAI-compatible response format.
|
|
56
|
+
*
|
|
57
|
+
* @param request - The chat request with model and messages
|
|
58
|
+
* @returns The chat response with choices, usage, and credits
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* const response = await client.chat({
|
|
62
|
+
* model: "gpt-5.1",
|
|
63
|
+
* messages: [
|
|
64
|
+
* { role: "system", content: "You are a helpful assistant." },
|
|
65
|
+
* { role: "user", content: "What is TypeScript?" },
|
|
66
|
+
* ],
|
|
67
|
+
* });
|
|
68
|
+
* console.log(response.choices[0].message.content);
|
|
69
|
+
*/
|
|
70
|
+
chat(request: ChatRequest): Promise<ChatResponse>;
|
|
71
|
+
/**
|
|
72
|
+
* Send a chat completion request using /v1/chat/completions endpoint.
|
|
73
|
+
*
|
|
74
|
+
* This is an alternative endpoint that also returns OpenAI-compatible format.
|
|
75
|
+
* Use this if you need compatibility with OpenAI's exact endpoint path.
|
|
76
|
+
*
|
|
77
|
+
* @param request - The chat request with model and messages
|
|
78
|
+
* @returns The chat response with choices, usage, and credits
|
|
79
|
+
*/
|
|
80
|
+
completions(request: ChatRequest): Promise<ChatResponse>;
|
|
81
|
+
/**
|
|
82
|
+
* Send a request to the legacy /generate endpoint.
|
|
83
|
+
*
|
|
84
|
+
* This endpoint returns a different response format than chat().
|
|
85
|
+
* Use chat() for new projects; this is for backwards compatibility.
|
|
86
|
+
*
|
|
87
|
+
* @param request - The generate request with model and messages
|
|
88
|
+
* @returns The generate response with reply, tokens_used, cost
|
|
89
|
+
*
|
|
90
|
+
* @example
|
|
91
|
+
* const response = await client.generate({
|
|
92
|
+
* model: "gpt-5.1",
|
|
93
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
94
|
+
* });
|
|
95
|
+
* console.log(response.reply);
|
|
96
|
+
*/
|
|
97
|
+
generate(request: GenerateRequest): Promise<GenerateResponse>;
|
|
98
|
+
/**
|
|
99
|
+
* Stream a chat completion response.
|
|
100
|
+
*
|
|
101
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
102
|
+
*
|
|
103
|
+
* @throws Error always - streaming not supported in this version
|
|
104
|
+
*/
|
|
105
|
+
streamChat(_request: ChatRequest): AsyncGenerator<ChatResponse, void, unknown>;
|
|
106
|
+
/**
|
|
107
|
+
* Stream a completions response.
|
|
108
|
+
*
|
|
109
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
110
|
+
*
|
|
111
|
+
* @throws Error always - streaming not supported in this version
|
|
112
|
+
*/
|
|
113
|
+
streamCompletions(_request: ChatRequest): AsyncGenerator<ChatResponse, void, unknown>;
|
|
114
|
+
/**
|
|
115
|
+
* Stream a generate response.
|
|
116
|
+
*
|
|
117
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
118
|
+
*
|
|
119
|
+
* @throws Error always - streaming not supported in this version
|
|
120
|
+
*/
|
|
121
|
+
streamGenerate(_request: GenerateRequest): AsyncGenerator<GenerateResponse, void, unknown>;
|
|
122
|
+
}
|
|
123
|
+
export default PrimeLLMClient;
|
|
124
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;GAkBG;AAEH,OAAO,EACH,WAAW,EACX,YAAY,EACZ,eAAe,EACf,gBAAgB,EAChB,qBAAqB,EACxB,MAAM,YAAY,CAAC;AAGpB,cAAc,YAAY,CAAC;AAE3B;;;;;GAKG;AACH,qBAAa,cAAc;IACvB,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,OAAO,CAAS;IACxB,OAAO,CAAC,SAAS,CAAS;IAE1B;;;;;;;;;;;;OAYG;gBACS,OAAO,EAAE,qBAAqB;IAS1C;;;OAGG;YACW,OAAO;IAsCrB;;;;;;;;;;;;;;;;;;OAkBG;IACG,IAAI,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAIvD;;;;;;;;OAQG;IACG,WAAW,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAI9D;;;;;;;;;;;;;;;OAeG;IACG,QAAQ,CAAC,OAAO,EAAE,eAAe,GAAG,OAAO,CAAC,gBAAgB,CAAC;IAQnE;;;;;;OAMG;IACI,UAAU,CACb,QAAQ,EAAE,WAAW,GACtB,cAAc,CAAC,YAAY,EAAE,IAAI,EAAE,OAAO,CAAC;IAQ9C;;;;;;OAMG;IACI,iBAAiB,CACpB,QAAQ,EAAE,WAAW,GACtB,cAAc,CAAC,YAAY,EAAE,IAAI,EAAE,OAAO,CAAC;IAO9C;;;;;;OAMG;IACI,cAAc,CACjB,QAAQ,EAAE,eAAe,GAC1B,cAAc,CAAC,gBAAgB,EAAE,IAAI,EAAE,OAAO,CAAC;CAMrD;AAGD,eAAe,cAAc,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PrimeLLM JavaScript SDK - Main Client
|
|
3
|
+
*
|
|
4
|
+
* This is the main SDK file. Developers import this to talk to PrimeLLM
|
|
5
|
+
* from JavaScript or TypeScript.
|
|
6
|
+
*
|
|
7
|
+
* Example usage:
|
|
8
|
+
*
|
|
9
|
+
* import { PrimeLLMClient } from "primellm";
|
|
10
|
+
*
|
|
11
|
+
* const client = new PrimeLLMClient({ apiKey: "primellm_live_XXX" });
|
|
12
|
+
*
|
|
13
|
+
* const response = await client.chat({
|
|
14
|
+
* model: "gpt-5.1",
|
|
15
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
16
|
+
* });
|
|
17
|
+
*
|
|
18
|
+
* console.log(response.choices[0].message.content);
|
|
19
|
+
*/
|
|
20
|
+
// Re-export types for convenience
|
|
21
|
+
export * from "./types.js";
|
|
22
|
+
/**
|
|
23
|
+
* PrimeLLM API Client
|
|
24
|
+
*
|
|
25
|
+
* This class handles all communication with the PrimeLLM API.
|
|
26
|
+
* It provides methods for chat, completions, and the legacy generate endpoint.
|
|
27
|
+
*/
|
|
28
|
+
export class PrimeLLMClient {
|
|
29
|
+
/**
|
|
30
|
+
* Create a new PrimeLLM client.
|
|
31
|
+
*
|
|
32
|
+
* @param options - Configuration options
|
|
33
|
+
* @param options.apiKey - Your PrimeLLM API key (required)
|
|
34
|
+
* @param options.baseURL - API base URL (default: "https://api.primellm.in")
|
|
35
|
+
* @param options.timeoutMs - Request timeout in ms (default: 60000)
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* const client = new PrimeLLMClient({
|
|
39
|
+
* apiKey: "primellm_live_XXX",
|
|
40
|
+
* });
|
|
41
|
+
*/
|
|
42
|
+
constructor(options) {
|
|
43
|
+
if (!options.apiKey) {
|
|
44
|
+
throw new Error("PrimeLLMClient: apiKey is required");
|
|
45
|
+
}
|
|
46
|
+
this.apiKey = options.apiKey;
|
|
47
|
+
this.baseURL = (options.baseURL ?? "https://api.primellm.in").replace(/\/$/, "");
|
|
48
|
+
this.timeoutMs = options.timeoutMs ?? 60000;
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Internal helper to make API requests.
|
|
52
|
+
* Handles authentication, JSON parsing, and error handling.
|
|
53
|
+
*/
|
|
54
|
+
async request(path, body, options) {
|
|
55
|
+
const controller = new AbortController();
|
|
56
|
+
const timeout = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
57
|
+
try {
|
|
58
|
+
const res = await fetch(`${this.baseURL}${path}`, {
|
|
59
|
+
method: options?.method ?? "POST",
|
|
60
|
+
headers: {
|
|
61
|
+
"Authorization": `Bearer ${this.apiKey}`,
|
|
62
|
+
"Content-Type": "application/json",
|
|
63
|
+
},
|
|
64
|
+
body: JSON.stringify(body),
|
|
65
|
+
signal: controller.signal,
|
|
66
|
+
});
|
|
67
|
+
if (!res.ok) {
|
|
68
|
+
const text = await res.text().catch(() => "");
|
|
69
|
+
throw new Error(`PrimeLLM API error: ${res.status} ${res.statusText} - ${text}`);
|
|
70
|
+
}
|
|
71
|
+
const json = await res.json();
|
|
72
|
+
return json;
|
|
73
|
+
}
|
|
74
|
+
catch (error) {
|
|
75
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
76
|
+
throw new Error(`PrimeLLM API request timed out after ${this.timeoutMs}ms`);
|
|
77
|
+
}
|
|
78
|
+
throw error;
|
|
79
|
+
}
|
|
80
|
+
finally {
|
|
81
|
+
clearTimeout(timeout);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Send a chat completion request using /v1/chat endpoint.
|
|
86
|
+
*
|
|
87
|
+
* This is the recommended method for most use cases.
|
|
88
|
+
* Returns an OpenAI-compatible response format.
|
|
89
|
+
*
|
|
90
|
+
* @param request - The chat request with model and messages
|
|
91
|
+
* @returns The chat response with choices, usage, and credits
|
|
92
|
+
*
|
|
93
|
+
* @example
|
|
94
|
+
* const response = await client.chat({
|
|
95
|
+
* model: "gpt-5.1",
|
|
96
|
+
* messages: [
|
|
97
|
+
* { role: "system", content: "You are a helpful assistant." },
|
|
98
|
+
* { role: "user", content: "What is TypeScript?" },
|
|
99
|
+
* ],
|
|
100
|
+
* });
|
|
101
|
+
* console.log(response.choices[0].message.content);
|
|
102
|
+
*/
|
|
103
|
+
async chat(request) {
|
|
104
|
+
return this.request("/v1/chat", request);
|
|
105
|
+
}
|
|
106
|
+
/**
|
|
107
|
+
* Send a chat completion request using /v1/chat/completions endpoint.
|
|
108
|
+
*
|
|
109
|
+
* This is an alternative endpoint that also returns OpenAI-compatible format.
|
|
110
|
+
* Use this if you need compatibility with OpenAI's exact endpoint path.
|
|
111
|
+
*
|
|
112
|
+
* @param request - The chat request with model and messages
|
|
113
|
+
* @returns The chat response with choices, usage, and credits
|
|
114
|
+
*/
|
|
115
|
+
async completions(request) {
|
|
116
|
+
return this.request("/v1/chat/completions", request);
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Send a request to the legacy /generate endpoint.
|
|
120
|
+
*
|
|
121
|
+
* This endpoint returns a different response format than chat().
|
|
122
|
+
* Use chat() for new projects; this is for backwards compatibility.
|
|
123
|
+
*
|
|
124
|
+
* @param request - The generate request with model and messages
|
|
125
|
+
* @returns The generate response with reply, tokens_used, cost
|
|
126
|
+
*
|
|
127
|
+
* @example
|
|
128
|
+
* const response = await client.generate({
|
|
129
|
+
* model: "gpt-5.1",
|
|
130
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
131
|
+
* });
|
|
132
|
+
* console.log(response.reply);
|
|
133
|
+
*/
|
|
134
|
+
async generate(request) {
|
|
135
|
+
return this.request("/generate", request);
|
|
136
|
+
}
|
|
137
|
+
// ============================================================
|
|
138
|
+
// STREAMING METHODS (Not implemented yet)
|
|
139
|
+
// ============================================================
|
|
140
|
+
/**
|
|
141
|
+
* Stream a chat completion response.
|
|
142
|
+
*
|
|
143
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
144
|
+
*
|
|
145
|
+
* @throws Error always - streaming not supported in this version
|
|
146
|
+
*/
|
|
147
|
+
async *streamChat(_request) {
|
|
148
|
+
throw new Error("streamChat is not implemented yet: backend streaming not supported in this SDK version.");
|
|
149
|
+
// This yield is never reached but satisfies TypeScript
|
|
150
|
+
yield undefined;
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Stream a completions response.
|
|
154
|
+
*
|
|
155
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
156
|
+
*
|
|
157
|
+
* @throws Error always - streaming not supported in this version
|
|
158
|
+
*/
|
|
159
|
+
async *streamCompletions(_request) {
|
|
160
|
+
throw new Error("streamCompletions is not implemented yet: backend streaming not supported in this SDK version.");
|
|
161
|
+
yield undefined;
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Stream a generate response.
|
|
165
|
+
*
|
|
166
|
+
* ⚠️ NOT IMPLEMENTED YET - Backend streaming support coming soon.
|
|
167
|
+
*
|
|
168
|
+
* @throws Error always - streaming not supported in this version
|
|
169
|
+
*/
|
|
170
|
+
async *streamGenerate(_request) {
|
|
171
|
+
throw new Error("streamGenerate is not implemented yet: backend streaming not supported in this SDK version.");
|
|
172
|
+
yield undefined;
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
// Default export for convenience
|
|
176
|
+
export default PrimeLLMClient;
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PrimeLLM SDK Types
|
|
3
|
+
*
|
|
4
|
+
* This file contains all the TypeScript types used by the PrimeLLM SDK.
|
|
5
|
+
* These types match the response format from the PrimeLLM API.
|
|
6
|
+
*
|
|
7
|
+
* Think of types like "templates" that describe what data looks like.
|
|
8
|
+
* They help catch errors before your code runs!
|
|
9
|
+
*/
|
|
10
|
+
/**
|
|
11
|
+
* The role of a message in a conversation.
|
|
12
|
+
* - "system": Instructions for the AI (like "be helpful")
|
|
13
|
+
* - "user": Messages from the human user
|
|
14
|
+
* - "assistant": Messages from the AI
|
|
15
|
+
*/
|
|
16
|
+
export type ChatRole = "system" | "user" | "assistant";
|
|
17
|
+
/**
|
|
18
|
+
* A single message in a conversation.
|
|
19
|
+
* Each message has a role (who said it) and content (what they said).
|
|
20
|
+
*/
|
|
21
|
+
export interface ChatMessage {
|
|
22
|
+
role: ChatRole;
|
|
23
|
+
content: string;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Request body for the /v1/chat and /v1/chat/completions endpoints.
|
|
27
|
+
* This is what you send TO the API.
|
|
28
|
+
*/
|
|
29
|
+
export interface ChatRequest {
|
|
30
|
+
/** Model name, e.g. "gpt-5.1", "claude-sonnet-4.5", "gemini-3.0" */
|
|
31
|
+
model: string;
|
|
32
|
+
/** List of messages in the conversation */
|
|
33
|
+
messages: ChatMessage[];
|
|
34
|
+
/** Whether to stream the response (not supported yet) */
|
|
35
|
+
stream?: boolean;
|
|
36
|
+
/** Optional extra data to pass along */
|
|
37
|
+
metadata?: Record<string, unknown>;
|
|
38
|
+
/** Temperature for randomness (0.0 = focused, 1.0 = creative) */
|
|
39
|
+
temperature?: number;
|
|
40
|
+
/** Maximum tokens to generate */
|
|
41
|
+
max_tokens?: number;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Request body for the /generate endpoint (legacy).
|
|
45
|
+
* This endpoint uses a simpler "prompt" format instead of messages.
|
|
46
|
+
*/
|
|
47
|
+
export interface GenerateRequest {
|
|
48
|
+
/** Model name, e.g. "gpt-5.1" */
|
|
49
|
+
model: string;
|
|
50
|
+
/** The messages to send (same as ChatRequest) */
|
|
51
|
+
messages: ChatMessage[];
|
|
52
|
+
/** Maximum tokens to generate */
|
|
53
|
+
max_tokens?: number;
|
|
54
|
+
/** Temperature for randomness */
|
|
55
|
+
temperature?: number;
|
|
56
|
+
/** Whether to stream (not supported yet) */
|
|
57
|
+
stream?: boolean;
|
|
58
|
+
/** Optional extra data */
|
|
59
|
+
metadata?: Record<string, unknown>;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* A single "choice" in the API response.
|
|
63
|
+
* The API can return multiple choices, but usually returns just one.
|
|
64
|
+
*/
|
|
65
|
+
export interface ChatChoice {
|
|
66
|
+
/** Index of this choice (usually 0) */
|
|
67
|
+
index: number;
|
|
68
|
+
/** The AI's response message */
|
|
69
|
+
message: ChatMessage;
|
|
70
|
+
/** Why the AI stopped: "stop" means it finished normally */
|
|
71
|
+
finish_reason?: string | null;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Token usage information.
|
|
75
|
+
* Tokens are like "word pieces" - the AI counts usage in tokens.
|
|
76
|
+
*/
|
|
77
|
+
export interface Usage {
|
|
78
|
+
/** Tokens used by your input (prompt) */
|
|
79
|
+
prompt_tokens: number;
|
|
80
|
+
/** Tokens used by the AI's response */
|
|
81
|
+
completion_tokens: number;
|
|
82
|
+
/** Total tokens = prompt + completion */
|
|
83
|
+
total_tokens: number;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Credit information from your PrimeLLM account.
|
|
87
|
+
* Credits are like "money" - each API call costs some credits.
|
|
88
|
+
*/
|
|
89
|
+
export interface CreditsInfo {
|
|
90
|
+
/** How many credits you have left */
|
|
91
|
+
remaining: number;
|
|
92
|
+
/** How much this request cost */
|
|
93
|
+
cost?: number;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* The full response from /v1/chat or /v1/chat/completions.
|
|
97
|
+
* This matches the OpenAI response format.
|
|
98
|
+
*/
|
|
99
|
+
export interface ChatResponse {
|
|
100
|
+
/** Unique ID for this response */
|
|
101
|
+
id: string;
|
|
102
|
+
/** Which model was used */
|
|
103
|
+
model: string;
|
|
104
|
+
/** When this was created (Unix timestamp in seconds) */
|
|
105
|
+
created: number;
|
|
106
|
+
/** Type of object (always "chat.completion") */
|
|
107
|
+
object?: string;
|
|
108
|
+
/** The AI's response(s) */
|
|
109
|
+
choices: ChatChoice[];
|
|
110
|
+
/** Token usage information */
|
|
111
|
+
usage: Usage;
|
|
112
|
+
/** Your credit balance (PrimeLLM-specific) */
|
|
113
|
+
credits?: CreditsInfo;
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* Response from the /generate endpoint (legacy format).
|
|
117
|
+
*/
|
|
118
|
+
export interface GenerateResponse {
|
|
119
|
+
/** The AI's reply text */
|
|
120
|
+
reply: string;
|
|
121
|
+
/** Which model was used */
|
|
122
|
+
model: string;
|
|
123
|
+
/** Total tokens used */
|
|
124
|
+
tokens_used: number;
|
|
125
|
+
/** Cost of this request */
|
|
126
|
+
cost: number;
|
|
127
|
+
/** Credits remaining in your account */
|
|
128
|
+
credits_remaining: number;
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Options for creating a PrimeLLMClient.
|
|
132
|
+
*/
|
|
133
|
+
export interface PrimeLLMClientOptions {
|
|
134
|
+
/** Your PrimeLLM API key (starts with "primellm_live_") */
|
|
135
|
+
apiKey: string;
|
|
136
|
+
/** Base URL for the API (default: "https://api.primellm.in") */
|
|
137
|
+
baseURL?: string;
|
|
138
|
+
/** Request timeout in milliseconds (default: 60000 = 60 seconds) */
|
|
139
|
+
timeoutMs?: number;
|
|
140
|
+
}
|
|
141
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAMH;;;;;GAKG;AACH,MAAM,MAAM,QAAQ,GAAG,QAAQ,GAAG,MAAM,GAAG,WAAW,CAAC;AAEvD;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;CACnB;AAMD;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,oEAAoE;IACpE,KAAK,EAAE,MAAM,CAAC;IAEd,2CAA2C;IAC3C,QAAQ,EAAE,WAAW,EAAE,CAAC;IAExB,yDAAyD;IACzD,MAAM,CAAC,EAAE,OAAO,CAAC;IAEjB,wCAAwC;IACxC,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAEnC,iEAAiE;IACjE,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB,iCAAiC;IACjC,UAAU,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;;GAGG;AACH,MAAM,WAAW,eAAe;IAC5B,iCAAiC;IACjC,KAAK,EAAE,MAAM,CAAC;IAEd,iDAAiD;IACjD,QAAQ,EAAE,WAAW,EAAE,CAAC;IAExB,iCAAiC;IACjC,UAAU,CAAC,EAAE,MAAM,CAAC;IAEpB,iCAAiC;IACjC,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB,4CAA4C;IAC5C,MAAM,CAAC,EAAE,OAAO,CAAC;IAEjB,0BAA0B;IAC1B,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACtC;AAMD;;;GAGG;AACH,MAAM,WAAW,UAAU;IACvB,uCAAuC;IACvC,KAAK,EAAE,MAAM,CAAC;IAEd,gCAAgC;IAChC,OAAO,EAAE,WAAW,CAAC;IAErB,4DAA4D;IAC5D,aAAa,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;CACjC;AAED;;;GAGG;AACH,MAAM,WAAW,KAAK;IAClB,yCAAyC;IACzC,aAAa,EAAE,MAAM,CAAC;IAEtB,uCAAuC;IACvC,iBAAiB,EAAE,MAAM,CAAC;IAE1B,yCAAyC;IACzC,YAAY,EAAE,MAAM,CAAC;CACxB;AAED;;;GAGG;AACH,MAAM,WAAW,WAAW;IACxB,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAC;IAElB,iCAAiC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC;CACjB;AAED;;;GAGG;AACH,MAAM,WAAW,YAAY;IACzB,kCAAkC;IAClC,EAAE,EAAE,MAAM,CAAC;IAEX,2BAA2B;IAC3B,KAAK,EAAE,MAAM,CAAC;IAEd,wDAAwD;IACxD,OAAO,EAAE,MAAM,CAAC;IAEhB,gDAAgD;IAChD,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB,2BAA2B;IAC3B,OAAO,EAAE,UAAU,EAAE,CAAC;IAEtB,8BAA8B;IAC9B,KAAK,EAAE,KAAK,CAAC;IAEb,8CAA8C;IAC9C,OAAO,CAAC,EAAE,WAAW,CAAC;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC7B,0BAA0B;IAC1B,KAAK,EAAE,MAAM,CAAC;IAEd,2BAA2B;IAC3B,KAAK,EAAE,MAAM,CAAC;IAEd,wBAAwB;IACxB,WAAW,EAAE,MAAM,CAAC;IAEpB,2BAA2B;IAC3B,IAAI,EAAE,MAAM,CAAC;IAEb,wCAAwC;IACxC,iBAAiB,EAAE,MAAM,CAAC;CAC7B;AAMD;;GAEG;AACH,MAAM,WAAW,qBAAqB;IAClC,2DAA2D;IAC3D,MAAM,EAAE,MAAM,CAAC;IAEf,gEAAgE;IAChE,OAAO,CAAC,EAAE,MAAM,CAAC;IAEjB,oEAAoE;IACpE,SAAS,CAAC,EAAE,MAAM,CAAC;CACtB"}
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* PrimeLLM SDK Types
|
|
3
|
+
*
|
|
4
|
+
* This file contains all the TypeScript types used by the PrimeLLM SDK.
|
|
5
|
+
* These types match the response format from the PrimeLLM API.
|
|
6
|
+
*
|
|
7
|
+
* Think of types like "templates" that describe what data looks like.
|
|
8
|
+
* They help catch errors before your code runs!
|
|
9
|
+
*/
|
|
10
|
+
export {};
|
package/package.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "primellm",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Official JavaScript SDK for PrimeLLM (gpt-5.1, Claude-style AI).",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"module": "dist/index.mjs",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"type": "module",
|
|
9
|
+
"keywords": [
|
|
10
|
+
"ai",
|
|
11
|
+
"llm",
|
|
12
|
+
"chat",
|
|
13
|
+
"primellm",
|
|
14
|
+
"gpt-5.1",
|
|
15
|
+
"claude",
|
|
16
|
+
"gemini",
|
|
17
|
+
"openai",
|
|
18
|
+
"sdk"
|
|
19
|
+
],
|
|
20
|
+
"author": "PrimeLLM",
|
|
21
|
+
"license": "MIT",
|
|
22
|
+
"scripts": {
|
|
23
|
+
"build": "tsc -p tsconfig.json",
|
|
24
|
+
"lint": "echo \"no lint configured yet\"",
|
|
25
|
+
"test": "node ./examples/chat-basic.mjs"
|
|
26
|
+
},
|
|
27
|
+
"devDependencies": {
|
|
28
|
+
"typescript": "^5.6.0"
|
|
29
|
+
},
|
|
30
|
+
"engines": {
|
|
31
|
+
"node": ">=18.0.0"
|
|
32
|
+
},
|
|
33
|
+
"files": [
|
|
34
|
+
"dist",
|
|
35
|
+
"README.md"
|
|
36
|
+
],
|
|
37
|
+
"homepage": "https://primellm.in"
|
|
38
|
+
}
|