primellm 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -80
- package/cli/index.js +34 -0
- package/dist/errors.d.ts +59 -0
- package/dist/errors.d.ts.map +1 -0
- package/dist/errors.js +93 -0
- package/dist/index.d.ts +108 -84
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +211 -108
- package/dist/streaming.d.ts +29 -0
- package/dist/streaming.d.ts.map +1 -0
- package/dist/streaming.js +64 -0
- package/dist/tokenizer.d.ts +42 -0
- package/dist/tokenizer.d.ts.map +1 -0
- package/dist/tokenizer.js +61 -0
- package/dist/types.d.ts +93 -62
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +0 -3
- package/package.json +10 -3
package/dist/types.d.ts
CHANGED
|
@@ -3,139 +3,170 @@
|
|
|
3
3
|
*
|
|
4
4
|
* This file contains all the TypeScript types used by the PrimeLLM SDK.
|
|
5
5
|
* These types match the response format from the PrimeLLM API.
|
|
6
|
-
*
|
|
7
|
-
* Think of types like "templates" that describe what data looks like.
|
|
8
|
-
* They help catch errors before your code runs!
|
|
9
6
|
*/
|
|
10
7
|
/**
|
|
11
8
|
* The role of a message in a conversation.
|
|
12
|
-
* - "system": Instructions for the AI (like "be helpful")
|
|
13
|
-
* - "user": Messages from the human user
|
|
14
|
-
* - "assistant": Messages from the AI
|
|
15
9
|
*/
|
|
16
10
|
export type ChatRole = "system" | "user" | "assistant";
|
|
17
11
|
/**
|
|
18
12
|
* A single message in a conversation.
|
|
19
|
-
* Each message has a role (who said it) and content (what they said).
|
|
20
13
|
*/
|
|
21
14
|
export interface ChatMessage {
|
|
22
15
|
role: ChatRole;
|
|
23
16
|
content: string;
|
|
24
17
|
}
|
|
18
|
+
/** Alias for ChatMessage */
|
|
19
|
+
export type Message = ChatMessage;
|
|
25
20
|
/**
|
|
26
|
-
* Request body for the /v1/chat
|
|
27
|
-
* This is what you send TO the API.
|
|
21
|
+
* Request body for the /v1/chat endpoint.
|
|
28
22
|
*/
|
|
29
23
|
export interface ChatRequest {
|
|
30
|
-
/** Model name, e.g. "gpt-5.1", "claude-sonnet-4.5", "gemini-3.0" */
|
|
31
24
|
model: string;
|
|
32
|
-
/** List of messages in the conversation */
|
|
33
25
|
messages: ChatMessage[];
|
|
34
|
-
/** Whether to stream the response (not supported yet) */
|
|
35
26
|
stream?: boolean;
|
|
36
|
-
/** Optional extra data to pass along */
|
|
37
27
|
metadata?: Record<string, unknown>;
|
|
38
|
-
/** Temperature for randomness (0.0 = focused, 1.0 = creative) */
|
|
39
28
|
temperature?: number;
|
|
40
|
-
/** Maximum tokens to generate */
|
|
41
29
|
max_tokens?: number;
|
|
42
30
|
}
|
|
43
31
|
/**
|
|
44
32
|
* Request body for the /generate endpoint (legacy).
|
|
45
|
-
* This endpoint uses a simpler "prompt" format instead of messages.
|
|
46
33
|
*/
|
|
47
34
|
export interface GenerateRequest {
|
|
48
|
-
/** Model name, e.g. "gpt-5.1" */
|
|
49
35
|
model: string;
|
|
50
|
-
/** The messages to send (same as ChatRequest) */
|
|
51
36
|
messages: ChatMessage[];
|
|
52
|
-
/** Maximum tokens to generate */
|
|
53
37
|
max_tokens?: number;
|
|
54
|
-
/** Temperature for randomness */
|
|
55
38
|
temperature?: number;
|
|
56
|
-
/** Whether to stream (not supported yet) */
|
|
57
39
|
stream?: boolean;
|
|
58
|
-
/** Optional extra data */
|
|
59
40
|
metadata?: Record<string, unknown>;
|
|
60
41
|
}
|
|
61
42
|
/**
|
|
62
|
-
*
|
|
63
|
-
* The API can return multiple choices, but usually returns just one.
|
|
43
|
+
* Request body for embeddings
|
|
64
44
|
*/
|
|
45
|
+
export interface EmbeddingsRequest {
|
|
46
|
+
model?: string;
|
|
47
|
+
input: string | string[];
|
|
48
|
+
}
|
|
65
49
|
export interface ChatChoice {
|
|
66
|
-
/** Index of this choice (usually 0) */
|
|
67
50
|
index: number;
|
|
68
|
-
/** The AI's response message */
|
|
69
51
|
message: ChatMessage;
|
|
70
|
-
/** Why the AI stopped: "stop" means it finished normally */
|
|
71
52
|
finish_reason?: string | null;
|
|
72
53
|
}
|
|
73
|
-
/**
|
|
74
|
-
* Token usage information.
|
|
75
|
-
* Tokens are like "word pieces" - the AI counts usage in tokens.
|
|
76
|
-
*/
|
|
77
54
|
export interface Usage {
|
|
78
|
-
/** Tokens used by your input (prompt) */
|
|
79
55
|
prompt_tokens: number;
|
|
80
|
-
/** Tokens used by the AI's response */
|
|
81
56
|
completion_tokens: number;
|
|
82
|
-
/** Total tokens = prompt + completion */
|
|
83
57
|
total_tokens: number;
|
|
84
58
|
}
|
|
85
|
-
/**
|
|
86
|
-
* Credit information from your PrimeLLM account.
|
|
87
|
-
* Credits are like "money" - each API call costs some credits.
|
|
88
|
-
*/
|
|
89
59
|
export interface CreditsInfo {
|
|
90
|
-
/** How many credits you have left */
|
|
91
60
|
remaining: number;
|
|
92
|
-
/** How much this request cost */
|
|
93
61
|
cost?: number;
|
|
94
62
|
}
|
|
95
|
-
/**
|
|
96
|
-
* The full response from /v1/chat or /v1/chat/completions.
|
|
97
|
-
* This matches the OpenAI response format.
|
|
98
|
-
*/
|
|
99
63
|
export interface ChatResponse {
|
|
100
|
-
/** Unique ID for this response */
|
|
101
64
|
id: string;
|
|
102
|
-
/** Which model was used */
|
|
103
65
|
model: string;
|
|
104
|
-
/** When this was created (Unix timestamp in seconds) */
|
|
105
66
|
created: number;
|
|
106
|
-
/** Type of object (always "chat.completion") */
|
|
107
67
|
object?: string;
|
|
108
|
-
/** The AI's response(s) */
|
|
109
68
|
choices: ChatChoice[];
|
|
110
|
-
/** Token usage information */
|
|
111
69
|
usage: Usage;
|
|
112
|
-
/** Your credit balance (PrimeLLM-specific) */
|
|
113
70
|
credits?: CreditsInfo;
|
|
114
71
|
}
|
|
115
|
-
/**
|
|
116
|
-
* Response from the /generate endpoint (legacy format).
|
|
117
|
-
*/
|
|
118
72
|
export interface GenerateResponse {
|
|
119
|
-
/** The AI's reply text */
|
|
120
73
|
reply: string;
|
|
121
|
-
/** Which model was used */
|
|
122
74
|
model: string;
|
|
123
|
-
/** Total tokens used */
|
|
124
75
|
tokens_used: number;
|
|
125
|
-
/** Cost of this request */
|
|
126
76
|
cost: number;
|
|
127
|
-
/** Credits remaining in your account */
|
|
128
77
|
credits_remaining: number;
|
|
129
78
|
}
|
|
130
79
|
/**
|
|
131
|
-
*
|
|
80
|
+
* Embedding data item
|
|
81
|
+
*/
|
|
82
|
+
export interface EmbeddingData {
|
|
83
|
+
object: string;
|
|
84
|
+
embedding: number[];
|
|
85
|
+
index: number;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Embeddings response
|
|
89
|
+
*/
|
|
90
|
+
export interface EmbeddingsResponse {
|
|
91
|
+
object: string;
|
|
92
|
+
data: EmbeddingData[];
|
|
93
|
+
model: string;
|
|
94
|
+
usage: {
|
|
95
|
+
prompt_tokens: number;
|
|
96
|
+
total_tokens: number;
|
|
97
|
+
};
|
|
98
|
+
cost?: number;
|
|
99
|
+
credits_remaining?: number;
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Model info
|
|
103
|
+
*/
|
|
104
|
+
export interface ModelInfo {
|
|
105
|
+
id: string;
|
|
106
|
+
object: string;
|
|
107
|
+
owned_by: string;
|
|
108
|
+
label: string;
|
|
109
|
+
description: string;
|
|
110
|
+
pricing: {
|
|
111
|
+
per_1k_tokens: number;
|
|
112
|
+
currency: string;
|
|
113
|
+
};
|
|
114
|
+
capabilities: string[];
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Models list response
|
|
118
|
+
*/
|
|
119
|
+
export interface ModelsResponse {
|
|
120
|
+
object: string;
|
|
121
|
+
data: ModelInfo[];
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Credits response
|
|
125
|
+
*/
|
|
126
|
+
export interface CreditsResponse {
|
|
127
|
+
credits: number;
|
|
128
|
+
currency: string;
|
|
129
|
+
user_id?: number;
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* API Key info
|
|
133
|
+
*/
|
|
134
|
+
export interface KeyInfo {
|
|
135
|
+
id: number;
|
|
136
|
+
key_prefix: string;
|
|
137
|
+
created_at: number;
|
|
138
|
+
revoked: boolean;
|
|
139
|
+
active: boolean;
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Keys list response
|
|
143
|
+
*/
|
|
144
|
+
export interface KeysResponse {
|
|
145
|
+
object: string;
|
|
146
|
+
data: KeyInfo[];
|
|
147
|
+
max_allowed: number;
|
|
148
|
+
}
|
|
149
|
+
/**
|
|
150
|
+
* Key create response
|
|
151
|
+
*/
|
|
152
|
+
export interface KeyCreateResponse {
|
|
153
|
+
id: number;
|
|
154
|
+
key: string;
|
|
155
|
+
created_at: number;
|
|
156
|
+
label?: string;
|
|
157
|
+
message: string;
|
|
158
|
+
}
|
|
159
|
+
/**
|
|
160
|
+
* Options for creating a PrimeLLM client.
|
|
132
161
|
*/
|
|
133
162
|
export interface PrimeLLMClientOptions {
|
|
134
|
-
/** Your PrimeLLM API key
|
|
163
|
+
/** Your PrimeLLM API key */
|
|
135
164
|
apiKey: string;
|
|
136
165
|
/** Base URL for the API (default: "https://api.primellm.in") */
|
|
137
166
|
baseURL?: string;
|
|
138
|
-
/** Request timeout in milliseconds (default: 60000
|
|
167
|
+
/** Request timeout in milliseconds (default: 60000) */
|
|
139
168
|
timeoutMs?: number;
|
|
169
|
+
/** Max retry attempts for failed requests (default: 3) */
|
|
170
|
+
maxRetries?: number;
|
|
140
171
|
}
|
|
141
172
|
//# sourceMappingURL=types.d.ts.map
|
package/dist/types.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAMH;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG,QAAQ,GAAG,MAAM,GAAG,WAAW,CAAC;AAEvD;;GAEG;AACH,MAAM,WAAW,WAAW;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;CACnB;AAED,4BAA4B;AAC5B,MAAM,MAAM,OAAO,GAAG,WAAW,CAAC;AAMlC;;GAEG;AACH,MAAM,WAAW,WAAW;IACxB,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,WAAW,EAAE,CAAC;IACxB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACnC,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,UAAU,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC5B,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,WAAW,EAAE,CAAC;IACxB,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACtC;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAC9B,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC5B;AAMD,MAAM,WAAW,UAAU;IACvB,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,WAAW,CAAC;IACrB,aAAa,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;CACjC;AAED,MAAM,WAAW,KAAK;IAClB,aAAa,EAAE,MAAM,CAAC;IACtB,iBAAiB,EAAE,MAAM,CAAC;IAC1B,YAAY,EAAE,MAAM,CAAC;CACxB;AAED,MAAM,WAAW,WAAW;IACxB,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,YAAY;IACzB,EAAE,EAAE,MAAM,CAAC;IACX,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,EAAE,UAAU,EAAE,CAAC;IACtB,KAAK,EAAE,KAAK,CAAC;IACb,OAAO,CAAC,EAAE,WAAW,CAAC;CACzB;AAED,MAAM,WAAW,gBAAgB;IAC7B,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;IACd,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,iBAAiB,EAAE,MAAM,CAAC;CAC7B;AAED;;GAEG;AACH,MAAM,WAAW,aAAa;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,EAAE,MAAM,EAAE,CAAC;IACpB,KAAK,EAAE,MAAM,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,kBAAkB;IAC/B,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,aAAa,EAAE,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE;QACH,aAAa,EAAE,MAAM,CAAC;QACtB,YAAY,EAAE,MAAM,CAAC;KACxB,CAAC;IACF,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,iBAAiB,CAAC,EAAE,MAAM,CAAC;CAC9B;AAED;;GAEG;AACH,MAAM,WAAW,SAAS;IACtB,EAAE,EAAE,MAAM,CAAC;IACX,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,EAAE,MAAM,CAAC;IACd,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,EAAE;QACL,aAAa,EAAE,MAAM,CAAC;QACtB,QAAQ,EAAE,MAAM,CAAC;KACpB,CAAC;IACF,YAAY,EAAE,MAAM,EAAE,CAAC;CAC1B;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC3B,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,SAAS,EAAE,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC5B,OAAO,EAAE,MAAM,CAAC;IAChB,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,OAAO;IACpB,EAAE,EAAE,MAAM,CAAC;IACX,UAAU,EAAE,MAAM,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,OAAO,CAAC;IACjB,MAAM,EAAE,OAAO,CAAC;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IACzB,MAAM,EAAE,MAAM,CAAC;IACf,IAAI,EAAE,OAAO,EAAE,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;CACvB;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAC9B,EAAE,EAAE,MAAM,CAAC;IACX,GAAG,EAAE,MAAM,CAAC;IACZ,UAAU,EAAE,MAAM,CAAC;IACnB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;CACnB;AAMD;;GAEG;AACH,MAAM,WAAW,qBAAqB;IAClC,4BAA4B;IAC5B,MAAM,EAAE,MAAM,CAAC;IAEf,gEAAgE;IAChE,OAAO,CAAC,EAAE,MAAM,CAAC;IAEjB,uDAAuD;IACvD,SAAS,CAAC,EAAE,MAAM,CAAC;IAEnB,0DAA0D;IAC1D,UAAU,CAAC,EAAE,MAAM,CAAC;CACvB"}
|
package/dist/types.js
CHANGED
|
@@ -3,8 +3,5 @@
|
|
|
3
3
|
*
|
|
4
4
|
* This file contains all the TypeScript types used by the PrimeLLM SDK.
|
|
5
5
|
* These types match the response format from the PrimeLLM API.
|
|
6
|
-
*
|
|
7
|
-
* Think of types like "templates" that describe what data looks like.
|
|
8
|
-
* They help catch errors before your code runs!
|
|
9
6
|
*/
|
|
10
7
|
export {};
|
package/package.json
CHANGED
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "primellm",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Official JavaScript SDK for PrimeLLM (gpt-5.1, Claude-style AI).",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"module": "dist/index.mjs",
|
|
7
7
|
"types": "dist/index.d.ts",
|
|
8
8
|
"type": "module",
|
|
9
|
+
"bin": {
|
|
10
|
+
"primellm": "./cli/index.js"
|
|
11
|
+
},
|
|
9
12
|
"keywords": [
|
|
10
13
|
"ai",
|
|
11
14
|
"llm",
|
|
@@ -15,14 +18,17 @@
|
|
|
15
18
|
"claude",
|
|
16
19
|
"gemini",
|
|
17
20
|
"openai",
|
|
18
|
-
"sdk"
|
|
21
|
+
"sdk",
|
|
22
|
+
"cli",
|
|
23
|
+
"streaming"
|
|
19
24
|
],
|
|
20
25
|
"author": "PrimeLLM",
|
|
21
26
|
"license": "MIT",
|
|
22
27
|
"scripts": {
|
|
23
28
|
"build": "tsc -p tsconfig.json",
|
|
24
29
|
"lint": "echo \"no lint configured yet\"",
|
|
25
|
-
"test": "node ./examples/chat-basic.mjs"
|
|
30
|
+
"test": "node ./examples/chat-basic.mjs",
|
|
31
|
+
"test:smoke": "node ./test/smoke.mjs"
|
|
26
32
|
},
|
|
27
33
|
"devDependencies": {
|
|
28
34
|
"typescript": "^5.6.0"
|
|
@@ -32,6 +38,7 @@
|
|
|
32
38
|
},
|
|
33
39
|
"files": [
|
|
34
40
|
"dist",
|
|
41
|
+
"cli",
|
|
35
42
|
"README.md"
|
|
36
43
|
],
|
|
37
44
|
"homepage": "https://primellm.in"
|