llmasaservice-client 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # llmasaservice-client
2
2
 
3
+ ## 0.4.0
4
+
5
+ ### Minor Changes
6
+
7
+ - Added the ability to disable cache for some calls on send
8
+
3
9
  ## 0.3.0
4
10
 
5
11
  ### Minor Changes
package/dist/index.js CHANGED
@@ -91,7 +91,7 @@ var useLLM = (options) => {
91
91
  setIdle(true);
92
92
  };
93
93
  function send(_0) {
94
- return __async(this, arguments, function* (prompt, messages = [], stream = true, abortController = new AbortController(), service = null, onComplete, onError) {
94
+ return __async(this, arguments, function* (prompt, messages = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
95
95
  var _a, _b, _c, _d, _e;
96
96
  setResponse("");
97
97
  setIdle(false);
@@ -101,8 +101,9 @@ var useLLM = (options) => {
101
101
  serviceId: service,
102
102
  prompt,
103
103
  messages,
104
- customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {}
104
+ customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {},
105
105
  // if no customer, use the projectId as the customer_id
106
+ allowCaching
106
107
  });
107
108
  const options2 = {
108
109
  method: "POST",
package/dist/index.mjs CHANGED
@@ -54,7 +54,7 @@ var useLLM = (options) => {
54
54
  setIdle(true);
55
55
  };
56
56
  function send(_0) {
57
- return __async(this, arguments, function* (prompt, messages = [], stream = true, abortController = new AbortController(), service = null, onComplete, onError) {
57
+ return __async(this, arguments, function* (prompt, messages = [], stream = true, allowCaching = true, service = null, abortController = new AbortController(), onComplete, onError) {
58
58
  var _a, _b, _c, _d, _e;
59
59
  setResponse("");
60
60
  setIdle(false);
@@ -64,8 +64,9 @@ var useLLM = (options) => {
64
64
  serviceId: service,
65
65
  prompt,
66
66
  messages,
67
- customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {}
67
+ customer: (_b = context == null ? void 0 : context.customer) != null ? _b : {},
68
68
  // if no customer, use the projectId as the customer_id
69
+ allowCaching
69
70
  });
70
71
  const options2 = {
71
72
  method: "POST",
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "llmasaservice-client",
3
3
  "license": "MIT",
4
- "version": "0.3.0",
4
+ "version": "0.4.0",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.mjs",
7
7
  "types": "dist/index.d.ts",
package/src/useLLM.ts CHANGED
@@ -44,8 +44,9 @@ export const useLLM = (options?: LLMServiceType): UseLLMReturnType => {
44
44
  * @param prompt The prompt to send the the LLM service.
45
45
  * @param messages The history and context messages to send to the LLM service. as an array of {role: string, content: string} objects. for example, [{ role: "system", content: "You are a useful assistant." }]
46
46
  * @param stream Determines whether to stream results back in the response property as they return from the service or batch them up and return them all at once in the response property as a string.
47
- * @param abortController The AbortController used to abort this request once its started. This allows you to add a stop button to your UI.
47
+ * @param allowCaching Determines whether the service can use cached results or not.
48
48
  * @param service The service to use for the request. If null, load balancing will be applied. This is typically only used for testing.
49
+ * @param abortController The AbortController used to abort this request once its started. This allows you to add a stop button to your UI.
49
50
  * @param onComplete The callback function to be called once the stream completes, with the final result string.
50
51
  * @param onError The callback function to be called if an error occurs, with the error string.
51
52
  * @returns a StreamReader object if stream is true, otherwise a string of the response. Typically this isn't used when streaming, the stream is exposed in the response property.
@@ -54,11 +55,11 @@ export const useLLM = (options?: LLMServiceType): UseLLMReturnType => {
54
55
  prompt: string,
55
56
  messages = [],
56
57
  stream: boolean = true,
57
- abortController: AbortController = new AbortController(),
58
+ allowCaching: boolean = true,
58
59
  service: string | null = null, // null means use the default service and apply services load balancing
60
+ abortController: AbortController = new AbortController(),
59
61
  onComplete?: (result: string) => void,
60
62
  onError?: (error: string) => void
61
-
62
63
  ): Promise<ReadableStreamDefaultReader<any> | string | undefined> {
63
64
  setResponse("");
64
65
  setIdle(false);
@@ -71,6 +72,7 @@ export const useLLM = (options?: LLMServiceType): UseLLMReturnType => {
71
72
  prompt: prompt,
72
73
  messages: messages,
73
74
  customer: context?.customer ?? {}, // if no customer, use the projectId as the customer_id
75
+ allowCaching: allowCaching,
74
76
  });
75
77
 
76
78
  // trying to get cloudfront oac going. posts need to be signed, but when i add this the call fails...