xsai 0.0.2 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +40 -0
- package/dist/index.d.ts +32 -8
- package/dist/index.js +20 -1
- package/package.json +2 -1
package/README.md
CHANGED
|
@@ -2,6 +2,46 @@
|
|
|
2
2
|
|
|
3
3
|
Extra-small AI SDK for any OpenAI-compatible API.
|
|
4
4
|
|
|
5
|
+
<!-- automd:badges color="lime" license bundlephobia -->
|
|
6
|
+
|
|
7
|
+
[](https://npmjs.com/package/xsai)
|
|
8
|
+
[](https://npm.chart.dev/xsai)
|
|
9
|
+
[](https://bundlephobia.com/package/xsai)
|
|
10
|
+
[](https://github.com/moeru-ai/xsai/blob/main/LICENSE)
|
|
11
|
+
|
|
12
|
+
<!-- /automd -->
|
|
13
|
+
|
|
14
|
+
## Why?
|
|
15
|
+
|
|
16
|
+
I'm working on a tiny local-LLM translator - [ARPK](https://github.com/moeru-ai/arpk), which is currently (v0.2.4) 449KB, of which 26% is `ollama-js` (the other 13% is the pointless `whatwg-fetch` that comes with `ollama-js`!)
|
|
17
|
+
|
|
18
|
+
I wanted to make every byte count, so I started writing a lightweight library - xsAI.
|
|
19
|
+
|
|
20
|
+
It provides an interface similar to the Vercel AI SDK, ESM-only and zero dependencies for minimal installation size.
|
|
21
|
+
|
|
22
|
+
## Install
|
|
23
|
+
|
|
24
|
+
<!-- automd:pm-install auto=false -->
|
|
25
|
+
|
|
26
|
+
```sh
|
|
27
|
+
# npm
|
|
28
|
+
npm install xsai
|
|
29
|
+
|
|
30
|
+
# yarn
|
|
31
|
+
yarn add xsai
|
|
32
|
+
|
|
33
|
+
# pnpm
|
|
34
|
+
pnpm install xsai
|
|
35
|
+
|
|
36
|
+
# bun
|
|
37
|
+
bun install xsai
|
|
38
|
+
|
|
39
|
+
# deno
|
|
40
|
+
deno install xsai
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
<!-- /automd -->
|
|
44
|
+
|
|
5
45
|
## License
|
|
6
46
|
|
|
7
47
|
[MIT](LICENSE.md)
|
package/dist/index.d.ts
CHANGED
|
@@ -1,9 +1,5 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
role: 'assistant' | 'system' | 'user' | ({} & string);
|
|
4
|
-
}
|
|
5
|
-
|
|
6
|
-
type Model = 'gemma2' | 'llama3.1' | 'llama3.2' | 'llama3.2-vision' | 'mistral-nemo' | 'mistral-small' | 'nemotron' | 'qwen2.5' | 'qwen2.5-coder' | ({} & string);
|
|
1
|
+
type GenerationModel = 'gemma2' | 'llama3.1' | 'llama3.2' | 'llama3.2-vision' | 'mistral-nemo' | 'mistral-small' | 'nemotron' | 'qwen2.5' | 'qwen2.5-coder' | ({} & string);
|
|
2
|
+
type EmbedModel = 'all-minilm' | 'mxbai-embed-large' | 'nomic-embed-text' | ({} & string);
|
|
7
3
|
|
|
8
4
|
interface CommonRequestOptions {
|
|
9
5
|
/**
|
|
@@ -12,9 +8,37 @@ interface CommonRequestOptions {
|
|
|
12
8
|
base?: string;
|
|
13
9
|
}
|
|
14
10
|
|
|
11
|
+
interface EmbedOptions extends CommonRequestOptions {
|
|
12
|
+
input: string | string[];
|
|
13
|
+
model: EmbedModel;
|
|
14
|
+
/** @default `embeddings` */
|
|
15
|
+
path?: 'embeddings' | ({} & string);
|
|
16
|
+
}
|
|
17
|
+
interface EmbedResponseUsage {
|
|
18
|
+
prompt_tokens: number;
|
|
19
|
+
total_tokens: number;
|
|
20
|
+
}
|
|
21
|
+
interface EmbedResult {
|
|
22
|
+
embedding: number[];
|
|
23
|
+
request: Request;
|
|
24
|
+
response: Response;
|
|
25
|
+
usage?: EmbedResponseUsage;
|
|
26
|
+
}
|
|
27
|
+
declare const embed: (options: EmbedOptions) => Promise<{
|
|
28
|
+
embedding: number[];
|
|
29
|
+
request: Request;
|
|
30
|
+
response: Response;
|
|
31
|
+
usage: EmbedResponseUsage;
|
|
32
|
+
}>;
|
|
33
|
+
|
|
34
|
+
interface Message {
|
|
35
|
+
content: string;
|
|
36
|
+
role: 'assistant' | 'system' | 'user' | ({} & string);
|
|
37
|
+
}
|
|
38
|
+
|
|
15
39
|
interface GenerateTextOptions extends CommonRequestOptions {
|
|
16
40
|
messages?: Message[];
|
|
17
|
-
model:
|
|
41
|
+
model: GenerationModel;
|
|
18
42
|
/** @default `completions` */
|
|
19
43
|
path?: 'completions' | ({} & string);
|
|
20
44
|
prompt: string;
|
|
@@ -26,4 +50,4 @@ interface GenerateTextResult {
|
|
|
26
50
|
}
|
|
27
51
|
declare const generateText: (options: GenerateTextOptions) => Promise<GenerateTextResult>;
|
|
28
52
|
|
|
29
|
-
export { generateText };
|
|
53
|
+
export { type EmbedOptions, type EmbedResponseUsage, type EmbedResult, type GenerateTextOptions, type GenerateTextResult, embed, generateText };
|
package/dist/index.js
CHANGED
|
@@ -2,6 +2,25 @@ const clean = (record) => Object.fromEntries(Object.entries(record).filter(([, v
|
|
|
2
2
|
|
|
3
3
|
const base = "http://localhost:11434/v1/";
|
|
4
4
|
|
|
5
|
+
const embed = async (options) => {
|
|
6
|
+
const request = new Request(new URL(options.path ?? "embeddings", options.base ?? base), {
|
|
7
|
+
body: JSON.stringify(clean({
|
|
8
|
+
...options,
|
|
9
|
+
base: void 0,
|
|
10
|
+
path: void 0
|
|
11
|
+
})),
|
|
12
|
+
method: "POST"
|
|
13
|
+
});
|
|
14
|
+
const response = await fetch(request);
|
|
15
|
+
const json = await response.json();
|
|
16
|
+
return {
|
|
17
|
+
embedding: json.data[0].embedding,
|
|
18
|
+
request,
|
|
19
|
+
response,
|
|
20
|
+
usage: json.usage
|
|
21
|
+
};
|
|
22
|
+
};
|
|
23
|
+
|
|
5
24
|
const generateText = async (options) => {
|
|
6
25
|
const request = new Request(new URL(options.path ?? "completions", options.base ?? base), {
|
|
7
26
|
body: JSON.stringify(clean({
|
|
@@ -21,4 +40,4 @@ const generateText = async (options) => {
|
|
|
21
40
|
};
|
|
22
41
|
};
|
|
23
42
|
|
|
24
|
-
export { generateText };
|
|
43
|
+
export { embed, generateText };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "xsai",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.3",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"author": "藍+85CD",
|
|
6
6
|
"license": "MIT",
|
|
@@ -30,6 +30,7 @@
|
|
|
30
30
|
"@importantimport/tsconfig": "^0.1.1",
|
|
31
31
|
"@types/eslint": "^9.6.1",
|
|
32
32
|
"@types/node": "^22.9.1",
|
|
33
|
+
"automd": "^0.3.12",
|
|
33
34
|
"bumpp": "^9.8.1",
|
|
34
35
|
"eslint": "^9.15.0",
|
|
35
36
|
"jiti": "^2.4.0",
|