llmasaservice-client 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # llmasaservice-client
2
2
 
3
+ ## 0.5.0
4
+
5
+ ### Minor Changes
6
+
7
+ - Adding the ability to pass data to the chat service
8
+
3
9
  ## 0.4.0
4
10
 
5
11
  ### Minor Changes
package/dist/index.js CHANGED
@@ -91,7 +91,7 @@ var useLLM = (options) => {
91
91
  setIdle(true);
92
92
  };
93
93
  function send(_0) {
94
- return __async(this, arguments, function* (prompt, messages = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
94
+ return __async(this, arguments, function* (prompt, messages = [], data = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
95
95
  var _a, _b, _c, _d, _e;
96
96
  setResponse("");
97
97
  setIdle(false);
@@ -101,6 +101,7 @@ var useLLM = (options) => {
101
101
  serviceId: service,
102
102
  prompt,
103
103
  messages,
104
+ data,
104
105
  customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {},
105
106
  // if no customer, use the projectId as the customer_id
106
107
  allowCaching
package/dist/index.mjs CHANGED
@@ -54,7 +54,7 @@ var useLLM = (options) => {
54
54
  setIdle(true);
55
55
  };
56
56
  function send(_0) {
57
- return __async(this, arguments, function* (prompt, messages = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
57
+ return __async(this, arguments, function* (prompt, messages = [], data = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
58
58
  var _a, _b, _c, _d, _e;
59
59
  setResponse("");
60
60
  setIdle(false);
@@ -64,6 +64,7 @@ var useLLM = (options) => {
64
64
  serviceId: service,
65
65
  prompt,
66
66
  messages,
67
+ data,
67
68
  customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {},
68
69
  // if no customer, use the projectId as the customer_id
69
70
  allowCaching
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "llmasaservice-client",
3
3
  "license": "MIT",
4
- "version": "0.4.0",
4
+ "version": "0.5.0",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
7
7
  "types": "dist/index.d.ts",
package/readme.md CHANGED
@@ -215,4 +215,59 @@ export default function Home() {
215
215
  );
216
216
  }
217
217
  ```
218
- ![Next / React Example](images/nextexample.png)
218
+ ![Next / React Example](images/nextexample.png)
219
+
220
+ # send method reference
221
+ The send method is the main entry point to call LLMs both streaming and synchronous. "prompt" is the only required field.
222
+
223
+ The full call signature is -
224
+
225
+ ```typescript
226
+ /**
227
+ * Calls the LLM as a service with the given prompt and messages. The response is returned in the response property of the hook.
228
+ *
229
+ * @param prompt The prompt to send the the LLM service.
230
+ * @param messages The history and context messages to send to the LLM service. as an array of {role: string, content: string} objects. for example, [{ role: "system", content: "You are a useful assistant." }]
231
+ * @param stream Determines whether to stream results back in the response property as they return from the service or batch them up and return them all at once in the response property as a string.
232
+ * @param allowCaching Determines whether the service can use cached results or not.
233
+ * @param service The service to use for the request. If null, load balancing will be applied. This is typically only used for testing.
234
+ * @param abortController The AbortController used to abort this request once its started. This allows you to add a stop button to your UI.
235
+ * @param onComplete The callback function to be called once the stream completes, with the final result string.
236
+ * @param onError The callback function to be called if an error occurs, with the error string.
237
+ * @returns a StreamReader object if stream is true, otherwise a string of the response. Typically this isn't used when streaming, the stream is exposed in the response property.
238
+ */
239
+ async function send(
240
+ prompt: string,
241
+ messages = [],
242
+ stream: boolean = true,
243
+ allowCaching: boolean = true,
244
+ service: string | null = null, // null means use the default service and apply services load balancing
245
+ abortController: AbortController = new AbortController(),
246
+ onComplete?: (result: string) => void,
247
+ onError?: (error: string) => void
248
+ ): Promise<ReadableStreamDefaultReader<any> | string | undefined> { ...
249
+ ```
250
+
251
+ Example:
252
+
253
+ Sends a streaming request that is progressively resuned in the response property. On full completion, the response is saved to the ValueText state.
254
+
255
+ ```typescript
256
+ await send(
257
+ "What is 1+1=",
258
+ [{
259
+ role: "system",
260
+ content: "Answer all responses like a pirate"
261
+ }],
262
+ true, // stream (if this is false, the return type of send is a string)
263
+ false, // don't cache this call
264
+ null, // this uses the default services, not any specific group id or service id
265
+ new AbortController(), // this allows abort functionality in UI's
266
+ (response: string) => { // this function is called when a streaming response completes fully
267
+ setValueText(response);
268
+ },
269
+ (errorMessage: string) => { // this function is called if there are any errors during the streaming
270
+ console.errro(errorMessage);
271
+ }
272
+ );
273
+ ```
package/src/useLLM.ts CHANGED
@@ -38,22 +38,24 @@ export const useLLM = (options?: LLMServiceType): UseLLMReturnType => {
38
38
  setIdle(true);
39
39
  };
40
40
 
41
- /**
42
- * Calls the LLM as a service with the given prompt and messages. The response is returned in the response property of the hook.
43
- *
44
- * @param prompt The prompt to send the the LLM service.
45
- * @param messages The history and context messages to send to the LLM service. as an array of {role: string, content: string} objects. for example, [{ role: "system", content: "You are a useful assistant." }]
46
- * @param stream Determines whether to stream results back in the response property as they return from the service or batch them up and return them all at once in the response property as a string.
47
- * @param allowCaching Determines whether the service can use cached results or not.
48
- * @param service The service to use for the request. If null, load balancing will be applied. This is typically only used for testing.
49
- * @param abortController The AbortController used to abort this request once its started. This allows you to add a stop button to your UI.
50
- * @param onComplete The callback function to be called once the stream completes, with the final result string.
51
- * @param onError The callback function to be called if an error occurs, with the error string.
52
- * @returns a StreamReader object if stream is true, otherwise a string of the response. Typically this isn't used when streaming, the stream is exposed in the response property.
53
- */
41
+ /**
42
+ * Calls the LLM as a service with the given prompt and messages. The response is returned in the response property of the hook.
43
+ *
44
+ * @param {string} prompt - The prompt to send to the LLM service.
45
+ * @param {Array<{role: string, content: string}>} messages - The history and context messages to send to the LLM service, as an array of {role: string, content: string} objects. For example, [{ role: "system", content: "You are a useful assistant." }]
46
+ * @param {Array<{key: string, data: string}>} data - The data to send to the LLM service, as an array of {key: string, data: string} objects. For example, [{ key: "name", value: "John" }]
47
+ * @param {boolean} stream - Determines whether to stream results back in the response property as they return from the service or batch them up and return them all at once in the response property as a string.
48
+ * @param {boolean} allowCaching - Determines whether the service can use cached results or not.
49
+ * @param {string | null} service - The service to use for the request. If null, load balancing will be applied. This is typically only used for testing.
50
+ * @param {AbortController} abortController - The AbortController used to abort this request once it's started. This allows you to add a stop button to your UI.
51
+ * @param {(result: string) => void} onComplete - The callback function to be called once the stream completes, with the final result string.
52
+ * @param {(error: string) => void} onError - The callback function to be called if an error occurs, with the error string.
53
+ * @returns {Promise<ReadableStreamDefaultReader<any> | string | undefined>} - A StreamReader object if stream is true, otherwise a string of the response. Typically this isn't used when streaming, the stream is exposed in the response property.
54
+ */
54
55
  async function send(
55
56
  prompt: string,
56
57
  messages = [],
58
+ data = [],
57
59
  stream: boolean = true,
58
60
  allowCaching: boolean = true,
59
61
  service: string | null = null, // null means use the default service and apply services load balancing
@@ -71,6 +73,7 @@ export const useLLM = (options?: LLMServiceType): UseLLMReturnType => {
71
73
  serviceId: service,
72
74
  prompt: prompt,
73
75
  messages: messages,
76
+ data: data,
74
77
  customer: context?.customer ?? {}, // if no customer, use the projectId as the customer_id
75
78
  allowCaching: allowCaching,
76
79
  });