@reverbia/sdk 1.0.0-next.20251126182814 → 1.0.0-next.20251127132400
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +186 -45
- package/dist/index.cjs +2 -1
- package/dist/index.mjs +2 -1
- package/dist/react/index.cjs +7 -5
- package/dist/react/index.mjs +7 -5
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,45 +1,186 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
3
|
-
A TypeScript SDK
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
##
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
1
|
+
# @reverbia/sdk
|
|
2
|
+
|
|
3
|
+
A TypeScript SDK that empowers developers to build AI-powered applications. It
|
|
4
|
+
enables you to send prompts to LLMs with streaming support, manage long-term
|
|
5
|
+
memories, and encrypt sensitive data, all without needing your own LLM API key.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pnpm install @reverbia/sdk@next
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
> **Note:** Currently, the SDK is pre-release so all new versions are released
|
|
14
|
+
> under the `next` tag (released on every merge to the `main` branch). Check out
|
|
15
|
+
> npm to see the latest version.
|
|
16
|
+
|
|
17
|
+
## Configuration
|
|
18
|
+
|
|
19
|
+
To use the SDK, you'll need to configure your Privy provider and API URL.
|
|
20
|
+
|
|
21
|
+
```env
|
|
22
|
+
PRIVY_APP_ID=cmhwlx82v000xle0cde4rjy5y
|
|
23
|
+
API_URL=https://ai-portal-dev.zetachain.com
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Authentication
|
|
27
|
+
|
|
28
|
+
The SDK currently only supports authentication via [Privy](https://privy.io) and
|
|
29
|
+
expects a Privy identity token.
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { useIdentityToken } from "@privy-io/react-auth";
|
|
33
|
+
|
|
34
|
+
const { identityToken } = useIdentityToken();
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Usage
|
|
38
|
+
|
|
39
|
+
For an example of how to use this functionality check out [the example
|
|
40
|
+
repo](https://github.com/zeta-chain/ai-examples).
|
|
41
|
+
|
|
42
|
+
### useChat
|
|
43
|
+
|
|
44
|
+
The `useChat` hook provides a convenient way to send chat messages to the LLM
|
|
45
|
+
API with automatic token management and loading state handling.
|
|
46
|
+
|
|
47
|
+
```typescript
|
|
48
|
+
import { useChat } from "@reverbia/sdk/react";
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
const { sendMessage, isLoading, stop } = useChat({
|
|
53
|
+
getToken: async () => identityToken || null,
|
|
54
|
+
onFinish: (response) => {
|
|
55
|
+
console.log("Chat finished:", response);
|
|
56
|
+
},
|
|
57
|
+
onError: (error) => {
|
|
58
|
+
console.error("Chat error:", error);
|
|
59
|
+
},
|
|
60
|
+
onData: (chunk) => {
|
|
61
|
+
console.log("Received chunk:", chunk);
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
const handleSend = async () => {
|
|
66
|
+
const result = await sendMessage({
|
|
67
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
68
|
+
model: "gpt-4o-mini",
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
if (result.error) {
|
|
72
|
+
console.error("Error:", result.error);
|
|
73
|
+
} else {
|
|
74
|
+
console.log("Response:", result.data);
|
|
75
|
+
}
|
|
76
|
+
};
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### useMemory
|
|
80
|
+
|
|
81
|
+
The `useMemory` hook allows you to extract facts/memories from messages and
|
|
82
|
+
search through stored memories (in IndexedDB) using semantic search.
|
|
83
|
+
|
|
84
|
+
How it works:
|
|
85
|
+
|
|
86
|
+
1. **Fact Extraction:** When prompts are sent to the LLM, they are analyzed for
|
|
87
|
+
relevant facts. If found, these facts are extracted and converted into vector
|
|
88
|
+
embeddings.
|
|
89
|
+
2. **Storage:** Extracted memories and their embeddings are stored locally in
|
|
90
|
+
IndexedDB.
|
|
91
|
+
3. **Retrieval:** New prompts are converted into embedding vectors and compared
|
|
92
|
+
against stored memories. Relevant memories are then retrieved and used as
|
|
93
|
+
context for the LLM interaction.
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
import { useMemory } from "@reverbia/sdk/react";
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
```typescript
|
|
100
|
+
const { extractMemoriesFromMessage, searchMemories } = useMemory({
|
|
101
|
+
getToken: async () => identityToken || null,
|
|
102
|
+
embeddingModel: "openai/text-embedding-3-small",
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
const handleExtract = async () => {
|
|
106
|
+
await extractMemoriesFromMessage({
|
|
107
|
+
messages: [
|
|
108
|
+
{ role: "user", content: "My favorite color is blue" },
|
|
109
|
+
{
|
|
110
|
+
role: "assistant",
|
|
111
|
+
content: "I will remember that your favorite color is blue.",
|
|
112
|
+
},
|
|
113
|
+
],
|
|
114
|
+
model: "gpt-4o",
|
|
115
|
+
});
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
const handleSearch = async () => {
|
|
119
|
+
const memories = await searchMemories("What is my favorite color?");
|
|
120
|
+
console.log(memories);
|
|
121
|
+
};
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### useEncryption
|
|
125
|
+
|
|
126
|
+
The `useEncryption` hook and utilities help you encrypt and decrypt local data
|
|
127
|
+
using a key derived from a wallet signature (requires `@privy-io/react-auth`).
|
|
128
|
+
|
|
129
|
+
```typescript
|
|
130
|
+
import { usePrivy } from "@privy-io/react-auth";
|
|
131
|
+
import { useEncryption, encryptData, decryptData } from "@reverbia/sdk/react";
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
const { authenticated } = usePrivy();
|
|
136
|
+
|
|
137
|
+
// Initialize encryption (requests signature if key not present)
|
|
138
|
+
// Pass true when user is authenticated with wallet
|
|
139
|
+
useEncryption(authenticated);
|
|
140
|
+
|
|
141
|
+
// Encrypt data
|
|
142
|
+
const saveSecret = async (text: string) => {
|
|
143
|
+
const encrypted = await encryptData(text);
|
|
144
|
+
localStorage.setItem("secret", encrypted);
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
// Decrypt data
|
|
148
|
+
const loadSecret = async () => {
|
|
149
|
+
const encrypted = localStorage.getItem("secret");
|
|
150
|
+
if (encrypted) {
|
|
151
|
+
const decrypted = await decryptData(encrypted);
|
|
152
|
+
console.log(decrypted);
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
### Direct API Access
|
|
158
|
+
|
|
159
|
+
You can also make requests to SDK functions directly without using the React
|
|
160
|
+
hooks.
|
|
161
|
+
|
|
162
|
+
```typescript
|
|
163
|
+
import { postApiV1ChatCompletions } from "@reverbia/sdk";
|
|
164
|
+
|
|
165
|
+
const response = await postApiV1ChatCompletions({
|
|
166
|
+
body: {
|
|
167
|
+
messages: [{ role: "user", content: "Tell me a joke" }],
|
|
168
|
+
model: "gpt-4o-mini",
|
|
169
|
+
},
|
|
170
|
+
headers: {
|
|
171
|
+
Authorization: `Bearer ${identityToken}`,
|
|
172
|
+
},
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
if (response.data) {
|
|
176
|
+
console.log(response.data.choices[0].message.content);
|
|
177
|
+
}
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
## Contributing
|
|
181
|
+
|
|
182
|
+
Contributions are welcome! Please feel free to submit a pull request.
|
|
183
|
+
|
|
184
|
+
## License
|
|
185
|
+
|
|
186
|
+
[MIT](LICENSE)
|
package/dist/index.cjs
CHANGED
|
@@ -833,9 +833,10 @@ var createClient = (config = {}) => {
|
|
|
833
833
|
};
|
|
834
834
|
|
|
835
835
|
// src/clientConfig.ts
|
|
836
|
+
var BASE_URL = "https://ai-portal-dev.zetachain.com";
|
|
836
837
|
var createClientConfig = (config) => ({
|
|
837
838
|
...config,
|
|
838
|
-
baseUrl:
|
|
839
|
+
baseUrl: BASE_URL
|
|
839
840
|
});
|
|
840
841
|
|
|
841
842
|
// src/client/client.gen.ts
|
package/dist/index.mjs
CHANGED
|
@@ -804,9 +804,10 @@ var createClient = (config = {}) => {
|
|
|
804
804
|
};
|
|
805
805
|
|
|
806
806
|
// src/clientConfig.ts
|
|
807
|
+
var BASE_URL = "https://ai-portal-dev.zetachain.com";
|
|
807
808
|
var createClientConfig = (config) => ({
|
|
808
809
|
...config,
|
|
809
|
-
baseUrl:
|
|
810
|
+
baseUrl: BASE_URL
|
|
810
811
|
});
|
|
811
812
|
|
|
812
813
|
// src/client/client.gen.ts
|
package/dist/react/index.cjs
CHANGED
|
@@ -851,9 +851,10 @@ var createClient = (config = {}) => {
|
|
|
851
851
|
};
|
|
852
852
|
|
|
853
853
|
// src/clientConfig.ts
|
|
854
|
+
var BASE_URL = "https://ai-portal-dev.zetachain.com";
|
|
854
855
|
var createClientConfig = (config) => ({
|
|
855
856
|
...config,
|
|
856
|
-
baseUrl:
|
|
857
|
+
baseUrl: BASE_URL
|
|
857
858
|
});
|
|
858
859
|
|
|
859
860
|
// src/client/client.gen.ts
|
|
@@ -863,7 +864,7 @@ var client = createClient(createClientConfig(createConfig()));
|
|
|
863
864
|
function useChat(options) {
|
|
864
865
|
const {
|
|
865
866
|
getToken,
|
|
866
|
-
baseUrl,
|
|
867
|
+
baseUrl = BASE_URL,
|
|
867
868
|
onData: globalOnData,
|
|
868
869
|
onFinish,
|
|
869
870
|
onError
|
|
@@ -920,6 +921,7 @@ function useChat(options) {
|
|
|
920
921
|
return { data: null, error: errorMsg };
|
|
921
922
|
}
|
|
922
923
|
const sseResult = await client.sse.post({
|
|
924
|
+
baseUrl,
|
|
923
925
|
url: "/api/v1/chat/completions",
|
|
924
926
|
body: {
|
|
925
927
|
messages,
|
|
@@ -1410,7 +1412,7 @@ var generateEmbeddingForText = async (text, options = {}) => {
|
|
|
1410
1412
|
const {
|
|
1411
1413
|
model = "openai/text-embedding-3-small",
|
|
1412
1414
|
getToken,
|
|
1413
|
-
baseUrl
|
|
1415
|
+
baseUrl = BASE_URL
|
|
1414
1416
|
} = options;
|
|
1415
1417
|
try {
|
|
1416
1418
|
const token = getToken ? await getToken() : null;
|
|
@@ -1455,7 +1457,7 @@ var generateEmbeddingsForMemories = async (memories, options = {}) => {
|
|
|
1455
1457
|
const {
|
|
1456
1458
|
model = "openai/text-embedding-3-small",
|
|
1457
1459
|
getToken,
|
|
1458
|
-
baseUrl
|
|
1460
|
+
baseUrl = BASE_URL
|
|
1459
1461
|
} = options;
|
|
1460
1462
|
const embeddings = /* @__PURE__ */ new Map();
|
|
1461
1463
|
for (const memory of memories) {
|
|
@@ -1518,7 +1520,7 @@ function useMemory(options = {}) {
|
|
|
1518
1520
|
generateEmbeddings = true,
|
|
1519
1521
|
onFactsExtracted,
|
|
1520
1522
|
getToken,
|
|
1521
|
-
baseUrl
|
|
1523
|
+
baseUrl = BASE_URL
|
|
1522
1524
|
} = options;
|
|
1523
1525
|
const extractionInProgressRef = (0, import_react3.useRef)(false);
|
|
1524
1526
|
const extractMemoriesFromMessage = (0, import_react3.useCallback)(
|
package/dist/react/index.mjs
CHANGED
|
@@ -807,9 +807,10 @@ var createClient = (config = {}) => {
|
|
|
807
807
|
};
|
|
808
808
|
|
|
809
809
|
// src/clientConfig.ts
|
|
810
|
+
var BASE_URL = "https://ai-portal-dev.zetachain.com";
|
|
810
811
|
var createClientConfig = (config) => ({
|
|
811
812
|
...config,
|
|
812
|
-
baseUrl:
|
|
813
|
+
baseUrl: BASE_URL
|
|
813
814
|
});
|
|
814
815
|
|
|
815
816
|
// src/client/client.gen.ts
|
|
@@ -819,7 +820,7 @@ var client = createClient(createClientConfig(createConfig()));
|
|
|
819
820
|
function useChat(options) {
|
|
820
821
|
const {
|
|
821
822
|
getToken,
|
|
822
|
-
baseUrl,
|
|
823
|
+
baseUrl = BASE_URL,
|
|
823
824
|
onData: globalOnData,
|
|
824
825
|
onFinish,
|
|
825
826
|
onError
|
|
@@ -876,6 +877,7 @@ function useChat(options) {
|
|
|
876
877
|
return { data: null, error: errorMsg };
|
|
877
878
|
}
|
|
878
879
|
const sseResult = await client.sse.post({
|
|
880
|
+
baseUrl,
|
|
879
881
|
url: "/api/v1/chat/completions",
|
|
880
882
|
body: {
|
|
881
883
|
messages,
|
|
@@ -1366,7 +1368,7 @@ var generateEmbeddingForText = async (text, options = {}) => {
|
|
|
1366
1368
|
const {
|
|
1367
1369
|
model = "openai/text-embedding-3-small",
|
|
1368
1370
|
getToken,
|
|
1369
|
-
baseUrl
|
|
1371
|
+
baseUrl = BASE_URL
|
|
1370
1372
|
} = options;
|
|
1371
1373
|
try {
|
|
1372
1374
|
const token = getToken ? await getToken() : null;
|
|
@@ -1411,7 +1413,7 @@ var generateEmbeddingsForMemories = async (memories, options = {}) => {
|
|
|
1411
1413
|
const {
|
|
1412
1414
|
model = "openai/text-embedding-3-small",
|
|
1413
1415
|
getToken,
|
|
1414
|
-
baseUrl
|
|
1416
|
+
baseUrl = BASE_URL
|
|
1415
1417
|
} = options;
|
|
1416
1418
|
const embeddings = /* @__PURE__ */ new Map();
|
|
1417
1419
|
for (const memory of memories) {
|
|
@@ -1474,7 +1476,7 @@ function useMemory(options = {}) {
|
|
|
1474
1476
|
generateEmbeddings = true,
|
|
1475
1477
|
onFactsExtracted,
|
|
1476
1478
|
getToken,
|
|
1477
|
-
baseUrl
|
|
1479
|
+
baseUrl = BASE_URL
|
|
1478
1480
|
} = options;
|
|
1479
1481
|
const extractionInProgressRef = useRef3(false);
|
|
1480
1482
|
const extractMemoriesFromMessage = useCallback2(
|