@reverbia/sdk 1.0.0-next.20251119170952 → 1.0.0-next.20251120124145

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,139 @@
1
+ /**
2
+ * ExtraFields contains additional metadata
3
+ */
4
+ type LlmapiChatCompletionExtraFields = {
5
+ /**
6
+ * Latency is the request latency in milliseconds
7
+ */
8
+ latency?: number;
9
+ /**
10
+ * ModelRequested is the model that was requested
11
+ */
12
+ model_requested?: string;
13
+ /**
14
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
15
+ */
16
+ provider?: string;
17
+ /**
18
+ * RequestType is always "chat_completion"
19
+ */
20
+ request_type?: string;
21
+ };
22
+ type LlmapiChatCompletionResponse = {
23
+ /**
24
+ * Choices contains the completion choices
25
+ */
26
+ choices?: Array<LlmapiChoice>;
27
+ extra_fields?: LlmapiChatCompletionExtraFields;
28
+ /**
29
+ * ID is the completion ID
30
+ */
31
+ id?: string;
32
+ /**
33
+ * Model is the model used
34
+ */
35
+ model?: string;
36
+ usage?: LlmapiChatCompletionUsage;
37
+ };
38
+ /**
39
+ * Usage contains token usage information
40
+ */
41
+ type LlmapiChatCompletionUsage = {
42
+ /**
43
+ * CompletionTokens is the number of tokens in the completion
44
+ */
45
+ completion_tokens?: number;
46
+ /**
47
+ * PromptTokens is the number of tokens in the prompt
48
+ */
49
+ prompt_tokens?: number;
50
+ /**
51
+ * TotalTokens is the total number of tokens used
52
+ */
53
+ total_tokens?: number;
54
+ };
55
+ type LlmapiChoice = {
56
+ /**
57
+ * FinishReason indicates why the completion stopped
58
+ */
59
+ finish_reason?: string;
60
+ /**
61
+ * Index is the choice index
62
+ */
63
+ index?: number;
64
+ message?: LlmapiMessage;
65
+ };
66
+ /**
67
+ * Message is the generated message
68
+ */
69
+ type LlmapiMessage = {
70
+ /**
71
+ * Content is the message content
72
+ */
73
+ content?: string;
74
+ role?: LlmapiRole;
75
+ };
76
+ /**
77
+ * Role is the message role (system, user, assistant)
78
+ */
79
+ type LlmapiRole = string;
80
+
81
+ type SendMessageArgs = {
82
+ messages: LlmapiMessage[];
83
+ model: string;
84
+ };
85
+ type SendMessageResult = {
86
+ data: LlmapiChatCompletionResponse;
87
+ error: null;
88
+ } | {
89
+ data: null;
90
+ error: string;
91
+ };
92
+ type UseChatOptions = {
93
+ getToken?: () => Promise<string | null>;
94
+ };
95
+ type UseChatResult = {
96
+ isLoading: boolean;
97
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
98
+ };
99
+ /**
100
+ * A React hook for managing chat completions with authentication.
101
+ *
102
+ * This hook provides a convenient way to send chat messages to the LLM API
103
+ * with automatic token management and loading state handling.
104
+ *
105
+ * @param options - Optional configuration object
106
+ * @param options.getToken - An async function that returns an authentication token.
107
+ * This token will be used as a Bearer token in the Authorization header.
108
+ * If not provided, `sendMessage` will return an error.
109
+ *
110
+ * @returns An object containing:
111
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
112
+ * - `sendMessage`: An async function to send chat messages
113
+ *
114
+ * @example
115
+ * ```tsx
116
+ * const { isLoading, sendMessage } = useChat({
117
+ * getToken: async () => {
118
+ * // Get your auth token from your auth provider
119
+ * return await getAuthToken();
120
+ * }
121
+ * });
122
+ *
123
+ * const handleSend = async () => {
124
+ * const result = await sendMessage({
125
+ * messages: [{ role: 'user', content: 'Hello!' }],
126
+ * model: 'gpt-4o-mini'
127
+ * });
128
+ *
129
+ * if (result.error) {
130
+ * console.error(result.error);
131
+ * } else {
132
+ * console.log(result.data);
133
+ * }
134
+ * };
135
+ * ```
136
+ */
137
+ declare function useChat(options?: UseChatOptions): UseChatResult;
138
+
139
+ export { useChat };
@@ -0,0 +1,65 @@
1
+ // src/react/useChat.ts
2
+ import { useCallback, useState } from "react";
3
+ import {
4
+ postApiV1ChatCompletions
5
+ } from "@reverbia/sdk";
6
+ function useChat(options) {
7
+ const { getToken } = options || {};
8
+ const [isLoading, setIsLoading] = useState(false);
9
+ const sendMessage = useCallback(
10
+ async ({
11
+ messages,
12
+ model
13
+ }) => {
14
+ if (!messages?.length) {
15
+ const error = "messages are required to call sendMessage.";
16
+ return { data: null, error };
17
+ }
18
+ if (!model) {
19
+ const error = "model is required to call sendMessage.";
20
+ return { data: null, error };
21
+ }
22
+ if (!getToken) {
23
+ const error = "Token getter function is required.";
24
+ return { data: null, error };
25
+ }
26
+ setIsLoading(true);
27
+ try {
28
+ const token = await getToken();
29
+ if (!token) {
30
+ const error = "No access token available.";
31
+ setIsLoading(false);
32
+ return { data: null, error };
33
+ }
34
+ const completion = await postApiV1ChatCompletions({
35
+ body: {
36
+ messages,
37
+ model
38
+ },
39
+ headers: {
40
+ Authorization: `Bearer ${token}`
41
+ }
42
+ });
43
+ if (!completion.data) {
44
+ const error = completion.error?.error ?? "API did not return a completion response.";
45
+ setIsLoading(false);
46
+ return { data: null, error };
47
+ }
48
+ setIsLoading(false);
49
+ return { data: completion.data, error: null };
50
+ } catch (err) {
51
+ const error = err instanceof Error ? err.message : "Failed to send message.";
52
+ setIsLoading(false);
53
+ return { data: null, error };
54
+ }
55
+ },
56
+ [getToken]
57
+ );
58
+ return {
59
+ isLoading,
60
+ sendMessage
61
+ };
62
+ }
63
+ export {
64
+ useChat
65
+ };
@@ -18,13 +18,13 @@ var __copyProps = (to, from, except, desc) => {
18
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
19
 
20
20
  // src/vercel/index.ts
21
- var vercel_exports = {};
22
- __export(vercel_exports, {
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
23
  createAssistantStream: () => createAssistantStream,
24
24
  createErrorStream: () => createErrorStream,
25
25
  mapMessagesToCompletionPayload: () => mapMessagesToCompletionPayload
26
26
  });
27
- module.exports = __toCommonJS(vercel_exports);
27
+ module.exports = __toCommonJS(index_exports);
28
28
 
29
29
  // src/vercel/messages.ts
30
30
  function mapMessagesToCompletionPayload(messages) {
@@ -1,5 +1,19 @@
1
1
  import { UIMessage } from 'ai';
2
- import { t as LlmapiMessage } from '../types.gen-Ar4CxyYC.mjs';
2
+
3
+ /**
4
+ * Message is the generated message
5
+ */
6
+ type LlmapiMessage = {
7
+ /**
8
+ * Content is the message content
9
+ */
10
+ content?: string;
11
+ role?: LlmapiRole;
12
+ };
13
+ /**
14
+ * Role is the message role (system, user, assistant)
15
+ */
16
+ type LlmapiRole = string;
3
17
 
4
18
  /**
5
19
  * Converts an array of Vercel AI {@link UIMessage} objects into the
@@ -1,5 +1,19 @@
1
1
  import { UIMessage } from 'ai';
2
- import { t as LlmapiMessage } from '../types.gen-Ar4CxyYC.js';
2
+
3
+ /**
4
+ * Message is the generated message
5
+ */
6
+ type LlmapiMessage = {
7
+ /**
8
+ * Content is the message content
9
+ */
10
+ content?: string;
11
+ role?: LlmapiRole;
12
+ };
13
+ /**
14
+ * Role is the message role (system, user, assistant)
15
+ */
16
+ type LlmapiRole = string;
3
17
 
4
18
  /**
5
19
  * Converts an array of Vercel AI {@link UIMessage} objects into the
package/package.json CHANGED
@@ -1,19 +1,27 @@
1
1
  {
2
2
  "name": "@reverbia/sdk",
3
- "version": "1.0.0-next.20251119170952",
3
+ "version": "1.0.0-next.20251120124145",
4
4
  "description": "",
5
- "main": "./dist/client/index.cjs",
6
- "module": "./dist/client/index.mjs",
7
- "types": "./dist/client/index.d.ts",
5
+ "main": "./dist/index.cjs",
6
+ "module": "./dist/index.mjs",
7
+ "types": "./dist/index.d.ts",
8
8
  "exports": {
9
9
  ".": {
10
- "types": "./dist/client/index.d.ts",
11
- "import": "./dist/client/index.mjs",
12
- "require": "./dist/client/index.cjs",
13
- "default": "./dist/client/index.cjs"
10
+ "types": "./dist/index.d.ts",
11
+ "react-server": "./dist/index.mjs",
12
+ "import": "./dist/index.mjs",
13
+ "require": "./dist/index.cjs",
14
+ "default": "./dist/index.cjs"
15
+ },
16
+ "./react": {
17
+ "types": "./dist/react/index.d.ts",
18
+ "import": "./dist/react/index.mjs",
19
+ "require": "./dist/react/index.cjs",
20
+ "default": "./dist/react/index.cjs"
14
21
  },
15
22
  "./vercel": {
16
23
  "types": "./dist/vercel/index.d.ts",
24
+ "react-server": "./dist/vercel/index.mjs",
17
25
  "import": "./dist/vercel/index.mjs",
18
26
  "require": "./dist/vercel/index.cjs",
19
27
  "default": "./dist/vercel/index.cjs"
@@ -46,10 +54,14 @@
46
54
  },
47
55
  "devDependencies": {
48
56
  "@hey-api/openapi-ts": "0.87.2",
57
+ "@types/react": "^19.2.6",
49
58
  "tsup": "^8.5.1",
50
59
  "typedoc": "^0.28.14",
51
60
  "typedoc-plugin-frontmatter": "^1.3.0",
52
61
  "typedoc-plugin-markdown": "^4.9.0",
53
62
  "typescript": "^5.9.3"
63
+ },
64
+ "peerDependencies": {
65
+ "react": "^18.0.0 || ^19.0.0"
54
66
  }
55
67
  }