@reverbia/sdk 1.0.0-next.20251125212314 → 1.0.0-next.20251127102727

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,45 +1,186 @@
1
- # AI SDK
2
-
3
- A TypeScript SDK for interacting with ZetaChain's AI Portal API. This SDK
4
- provides a type-safe, developer-friendly interface for building applications
5
- that leverage AI chat completion capabilities.
6
-
7
- ## Overview
8
-
9
- The AI SDK is an auto-generated TypeScript client library that wraps the
10
- ZetaChain AI Portal REST API. It enables seamless integration of AI-powered chat
11
- completion features into your applications with full TypeScript type safety and
12
- modern async/await patterns.
13
-
14
- ## Features
15
-
16
- - **Type-Safe API Client**: Fully typed interfaces generated from the OpenAPI
17
- specification, ensuring compile-time safety and excellent IDE autocomplete
18
- support
19
- - **Chat Completions**: Generate AI-powered chat responses using configurable
20
- models and conversation history
21
- - **Streaming Support**: Built-in support for streaming responses for real-time
22
- AI interactions
23
- - **Health Monitoring**: Check service health and status to ensure reliable API
24
- connectivity
25
- - **Flexible Configuration**: Customizable client instances with support for
26
- custom base URLs, authentication, and request/response interceptors
27
- - **Error Handling**: Configurable error handling with support for throwing
28
- errors or returning error objects
29
- - **Server-Sent Events**: Native support for SSE (Server-Sent Events) for
30
- real-time streaming capabilities
31
-
32
- ## Architecture
33
-
34
- The SDK is automatically generated from the OpenAPI specification using
35
- `@hey-api/openapi-ts`, ensuring it stays in sync with the latest API changes.
36
- The generated client provides a clean, promise-based API that follows modern
37
- JavaScript/TypeScript best practices.
38
-
39
- ## Use Cases
40
-
41
- - Building AI-powered chatbots and conversational interfaces
42
- - Integrating AI capabilities into web and mobile applications
43
- - Creating applications that require natural language processing
44
- - Developing tools that leverage large language models through ZetaChain's
45
- gateway
1
+ # @reverbia/sdk
2
+
3
+ A TypeScript SDK that empowers developers to build AI-powered applications. It
4
+ enables you to send prompts to LLMs with streaming support, manage long-term
5
+ memories, and encrypt sensitive data, all without needing your own LLM API key.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ pnpm install @reverbia/sdk@next
11
+ ```
12
+
13
+ > **Note:** Currently, the SDK is pre-release so all new versions are released
14
+ > under the `next` tag (released on every merge to the `main` branch). Check out
15
+ > npm to see the latest version.
16
+
17
+ ## Configuration
18
+
19
+ To use the SDK, you'll need to configure your Privy provider and API URL.
20
+
21
+ ```env
22
+ PRIVY_APP_ID=cmhwlx82v000xle0cde4rjy5y
23
+ API_URL=https://ai-portal-dev.zetachain.com
24
+ ```
25
+
26
+ ## Authentication
27
+
28
+ The SDK currently only supports authentication via [Privy](https://privy.io) and
29
+ expects a Privy identity token.
30
+
31
+ ```typescript
32
+ import { useIdentityToken } from "@privy-io/react-auth";
33
+
34
+ const { identityToken } = useIdentityToken();
35
+ ```
36
+
37
+ ## Usage
38
+
39
+ For an example of how to use this functionality check out [the example
40
+ repo](https://github.com/zeta-chain/ai-examples).
41
+
42
+ ### useChat
43
+
44
+ The `useChat` hook provides a convenient way to send chat messages to the LLM
45
+ API with automatic token management and loading state handling.
46
+
47
+ ```typescript
48
+ import { useChat } from "@reverbia/sdk/react";
49
+ ```
50
+
51
+ ```typescript
52
+ const { sendMessage, isLoading, stop } = useChat({
53
+ getToken: async () => identityToken || null,
54
+ onFinish: (response) => {
55
+ console.log("Chat finished:", response);
56
+ },
57
+ onError: (error) => {
58
+ console.error("Chat error:", error);
59
+ },
60
+ onData: (chunk) => {
61
+ console.log("Received chunk:", chunk);
62
+ },
63
+ });
64
+
65
+ const handleSend = async () => {
66
+ const result = await sendMessage({
67
+ messages: [{ role: "user", content: "Hello!" }],
68
+ model: "gpt-4o-mini",
69
+ });
70
+
71
+ if (result.error) {
72
+ console.error("Error:", result.error);
73
+ } else {
74
+ console.log("Response:", result.data);
75
+ }
76
+ };
77
+ ```
78
+
79
+ ### useMemory
80
+
81
+ The `useMemory` hook allows you to extract facts/memories from messages and
82
+ search through stored memories (in IndexedDB) using semantic search.
83
+
84
+ How it works:
85
+
86
+ 1. **Fact Extraction:** When prompts are sent to the LLM, they are analyzed for
87
+ relevant facts. If found, these facts are extracted and converted into vector
88
+ embeddings.
89
+ 2. **Storage:** Extracted memories and their embeddings are stored locally in
90
+ IndexedDB.
91
+ 3. **Retrieval:** New prompts are converted into embedding vectors and compared
92
+ against stored memories. Relevant memories are then retrieved and used as
93
+ context for the LLM interaction.
94
+
95
+ ```typescript
96
+ import { useMemory } from "@reverbia/sdk/react";
97
+ ```
98
+
99
+ ```typescript
100
+ const { extractMemoriesFromMessage, searchMemories } = useMemory({
101
+ getToken: async () => identityToken || null,
102
+ embeddingModel: "openai/text-embedding-3-small",
103
+ });
104
+
105
+ const handleExtract = async () => {
106
+ await extractMemoriesFromMessage({
107
+ messages: [
108
+ { role: "user", content: "My favorite color is blue" },
109
+ {
110
+ role: "assistant",
111
+ content: "I will remember that your favorite color is blue.",
112
+ },
113
+ ],
114
+ model: "gpt-4o",
115
+ });
116
+ };
117
+
118
+ const handleSearch = async () => {
119
+ const memories = await searchMemories("What is my favorite color?");
120
+ console.log(memories);
121
+ };
122
+ ```
123
+
124
+ ### useEncryption
125
+
126
+ The `useEncryption` hook and utilities help you encrypt and decrypt local data
127
+ using a key derived from a wallet signature (requires `@privy-io/react-auth`).
128
+
129
+ ```typescript
130
+ import { usePrivy } from "@privy-io/react-auth";
131
+ import { useEncryption, encryptData, decryptData } from "@reverbia/sdk/react";
132
+ ```
133
+
134
+ ```typescript
135
+ const { authenticated } = usePrivy();
136
+
137
+ // Initialize encryption (requests signature if key not present)
138
+ // Pass true when user is authenticated with wallet
139
+ useEncryption(authenticated);
140
+
141
+ // Encrypt data
142
+ const saveSecret = async (text: string) => {
143
+ const encrypted = await encryptData(text);
144
+ localStorage.setItem("secret", encrypted);
145
+ };
146
+
147
+ // Decrypt data
148
+ const loadSecret = async () => {
149
+ const encrypted = localStorage.getItem("secret");
150
+ if (encrypted) {
151
+ const decrypted = await decryptData(encrypted);
152
+ console.log(decrypted);
153
+ }
154
+ };
155
+ ```
156
+
157
+ ### Direct API Access
158
+
159
+ You can also make requests to SDK functions directly without using the React
160
+ hooks.
161
+
162
+ ```typescript
163
+ import { postApiV1ChatCompletions } from "@reverbia/sdk";
164
+
165
+ const response = await postApiV1ChatCompletions({
166
+ body: {
167
+ messages: [{ role: "user", content: "Tell me a joke" }],
168
+ model: "gpt-4o-mini",
169
+ },
170
+ headers: {
171
+ Authorization: `Bearer ${identityToken}`,
172
+ },
173
+ });
174
+
175
+ if (response.data) {
176
+ console.log(response.data.choices[0].message.content);
177
+ }
178
+ ```
179
+
180
+ ## Contributing
181
+
182
+ Contributions are welcome! Please feel free to submit a pull request.
183
+
184
+ ## License
185
+
186
+ [MIT](LICENSE)
package/dist/index.d.mts CHANGED
@@ -74,6 +74,10 @@ type LlmapiChatCompletionUsage = {
74
74
  * CompletionTokens is the number of tokens in the completion
75
75
  */
76
76
  completion_tokens?: number;
77
+ /**
78
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
79
+ */
80
+ cost_micro_usd?: number;
77
81
  /**
78
82
  * PromptTokens is the number of tokens in the prompt
79
83
  */
@@ -171,6 +175,10 @@ type LlmapiEmbeddingResponse = {
171
175
  * Usage contains token usage information
172
176
  */
173
177
  type LlmapiEmbeddingUsage = {
178
+ /**
179
+ * CostMicroUSD is the inference cost for this embedding request
180
+ */
181
+ cost_micro_usd?: number;
174
182
  /**
175
183
  * PromptTokens is the number of tokens in the prompt
176
184
  */
package/dist/index.d.ts CHANGED
@@ -74,6 +74,10 @@ type LlmapiChatCompletionUsage = {
74
74
  * CompletionTokens is the number of tokens in the completion
75
75
  */
76
76
  completion_tokens?: number;
77
+ /**
78
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
79
+ */
80
+ cost_micro_usd?: number;
77
81
  /**
78
82
  * PromptTokens is the number of tokens in the prompt
79
83
  */
@@ -171,6 +175,10 @@ type LlmapiEmbeddingResponse = {
171
175
  * Usage contains token usage information
172
176
  */
173
177
  type LlmapiEmbeddingUsage = {
178
+ /**
179
+ * CostMicroUSD is the inference cost for this embedding request
180
+ */
181
+ cost_micro_usd?: number;
174
182
  /**
175
183
  * PromptTokens is the number of tokens in the prompt
176
184
  */
@@ -43,6 +43,10 @@ type LlmapiChatCompletionUsage = {
43
43
  * CompletionTokens is the number of tokens in the completion
44
44
  */
45
45
  completion_tokens?: number;
46
+ /**
47
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
48
+ */
49
+ cost_micro_usd?: number;
46
50
  /**
47
51
  * PromptTokens is the number of tokens in the prompt
48
52
  */
@@ -43,6 +43,10 @@ type LlmapiChatCompletionUsage = {
43
43
  * CompletionTokens is the number of tokens in the completion
44
44
  */
45
45
  completion_tokens?: number;
46
+ /**
47
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
48
+ */
49
+ cost_micro_usd?: number;
46
50
  /**
47
51
  * PromptTokens is the number of tokens in the prompt
48
52
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@reverbia/sdk",
3
- "version": "1.0.0-next.20251125212314",
3
+ "version": "1.0.0-next.20251127102727",
4
4
  "description": "",
5
5
  "main": "./dist/index.cjs",
6
6
  "module": "./dist/index.mjs",
@@ -49,7 +49,7 @@
49
49
  },
50
50
  "homepage": "https://github.com/zeta-chain/ai-sdk#readme",
51
51
  "dependencies": {
52
- "@reverbia/portal": "1.0.0-next.20251120153026",
52
+ "@reverbia/portal": "1.0.0-next.20251126175613",
53
53
  "@reverbia/sdk": "1.0.0-next.20251120124145",
54
54
  "ai": "5.0.93"
55
55
  },