@upstash/qstash 2.6.0 → 2.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/README.md +7 -9
  2. package/index.d.mts +191 -64
  3. package/index.d.ts +191 -64
  4. package/index.js +206 -146
  5. package/index.mjs +201 -141
  6. package/package.json +1 -1
package/index.js CHANGED
@@ -13,10 +13,18 @@ var DLQ = class {
13
13
  * List messages in the dlq
14
14
  */
15
15
  async listMessages(options) {
16
+ const filterPayload = {
17
+ ..._optionalChain([options, 'optionalAccess', _ => _.filter]),
18
+ topicName: _optionalChain([options, 'optionalAccess', _2 => _2.filter, 'optionalAccess', _3 => _3.urlGroup])
19
+ };
16
20
  const messagesPayload = await this.http.request({
17
21
  method: "GET",
18
22
  path: ["v2", "dlq"],
19
- query: { cursor: _optionalChain([options, 'optionalAccess', _ => _.cursor]) }
23
+ query: {
24
+ cursor: _optionalChain([options, 'optionalAccess', _4 => _4.cursor]),
25
+ count: _optionalChain([options, 'optionalAccess', _5 => _5.count]),
26
+ ...filterPayload
27
+ }
20
28
  });
21
29
  return {
22
30
  messages: messagesPayload.messages.map((message) => {
@@ -84,8 +92,8 @@ var HttpClient = (_class = class {
84
92
  attempts: 1,
85
93
  backoff: () => 0
86
94
  } : {
87
- attempts: _optionalChain([config, 'access', _2 => _2.retry, 'optionalAccess', _3 => _3.retries]) ? config.retry.retries + 1 : 5,
88
- backoff: _nullishCoalesce(_optionalChain([config, 'access', _4 => _4.retry, 'optionalAccess', _5 => _5.backoff]), () => ( ((retryCount) => Math.exp(retryCount) * 50)))
95
+ attempts: _optionalChain([config, 'access', _6 => _6.retry, 'optionalAccess', _7 => _7.retries]) ? config.retry.retries + 1 : 5,
96
+ backoff: _nullishCoalesce(_optionalChain([config, 'access', _8 => _8.retry, 'optionalAccess', _9 => _9.backoff]), () => ( ((retryCount) => Math.exp(retryCount) * 50)))
89
97
  };
90
98
  }
91
99
  async request(request) {
@@ -193,6 +201,133 @@ var HttpClient = (_class = class {
193
201
  }
194
202
  }, _class);
195
203
 
204
+ // src/client/llm/chat.ts
205
+ var Chat = (_class2 = class _Chat {
206
+
207
+
208
+ constructor(http, token) {;_class2.prototype.__init3.call(this);_class2.prototype.__init4.call(this);_class2.prototype.__init5.call(this);
209
+ this.http = http;
210
+ this.token = token;
211
+ }
212
+ static toChatRequest(request) {
213
+ const messages = [];
214
+ messages.push(
215
+ { role: "system", content: request.system },
216
+ { role: "user", content: request.user }
217
+ );
218
+ const chatRequest = { ...request, messages };
219
+ return chatRequest;
220
+ }
221
+ /**
222
+ * Calls the Upstash completions api given a ChatRequest.
223
+ *
224
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
225
+ * if stream is enabled.
226
+ *
227
+ * @param request ChatRequest with messages
228
+ * @returns Chat completion or stream
229
+ */
230
+ __init3() {this.create = async (request) => {
231
+ if (request.provider.owner != "upstash")
232
+ return this.createThirdParty(request);
233
+ const body = JSON.stringify(request);
234
+ if ("stream" in request && request.stream) {
235
+ return this.http.requestStream({
236
+ path: ["llm", "v1", "chat", "completions"],
237
+ method: "POST",
238
+ headers: {
239
+ "Content-Type": "application/json",
240
+ Connection: "keep-alive",
241
+ Accept: "text/event-stream",
242
+ "Cache-Control": "no-cache",
243
+ Authorization: `Bearer ${this.token}`
244
+ },
245
+ body
246
+ });
247
+ }
248
+ return this.http.request({
249
+ path: ["llm", "v1", "chat", "completions"],
250
+ method: "POST",
251
+ headers: { "Content-Type": "application/json", Authorization: `Bearer ${this.token}` },
252
+ body
253
+ });
254
+ }}
255
+ /**
256
+ * Calls the Upstash completions api given a ChatRequest.
257
+ *
258
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
259
+ * if stream is enabled.
260
+ *
261
+ * @param request ChatRequest with messages
262
+ * @returns Chat completion or stream
263
+ */
264
+ __init4() {this.createThirdParty = async (request) => {
265
+ const { baseUrl, token, owner } = request.provider;
266
+ if (owner === "upstash")
267
+ throw new Error("Upstash is not 3rd party provider!");
268
+ delete request.provider;
269
+ delete request.system;
270
+ const body = JSON.stringify(request);
271
+ if ("stream" in request && request.stream) {
272
+ return this.http.requestStream({
273
+ path: ["v1", "chat", "completions"],
274
+ method: "POST",
275
+ headers: {
276
+ "Content-Type": "application/json",
277
+ Connection: "keep-alive",
278
+ Accept: "text/event-stream",
279
+ "Cache-Control": "no-cache",
280
+ Authorization: `Bearer ${token}`
281
+ },
282
+ body,
283
+ baseUrl
284
+ });
285
+ }
286
+ return this.http.request({
287
+ path: ["v1", "chat", "completions"],
288
+ method: "POST",
289
+ headers: {
290
+ "Content-Type": "application/json",
291
+ Authorization: `Bearer ${token}`
292
+ },
293
+ body,
294
+ baseUrl
295
+ });
296
+ }}
297
+ /**
298
+ * Calls the Upstash completions api given a PromptRequest.
299
+ *
300
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
301
+ * if stream is enabled.
302
+ *
303
+ * @param request PromptRequest with system and user messages.
304
+ * Note that system parameter shouldn't be passed in the case of
305
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
306
+ * @returns Chat completion or stream
307
+ */
308
+ __init5() {this.prompt = async (request) => {
309
+ const chatRequest = _Chat.toChatRequest(request);
310
+ return this.create(chatRequest);
311
+ }}
312
+ }, _class2);
313
+
314
+ // src/client/llm/utils.ts
315
+ function appendLLMOptionsIfNeeded(request, headers) {
316
+ if (_optionalChain([request, 'access', _10 => _10.api, 'optionalAccess', _11 => _11.provider, 'optionalAccess', _12 => _12.owner]) === "upstash") {
317
+ request.api = { name: "llm" };
318
+ return;
319
+ }
320
+ if (request.api && "provider" in request.api) {
321
+ const provider = request.api.provider;
322
+ if (!_optionalChain([provider, 'optionalAccess', _13 => _13.baseUrl]))
323
+ throw new Error("baseUrl cannot be empty or undefined!");
324
+ if (!provider.token)
325
+ throw new Error("token cannot be empty or undefined!");
326
+ request.url = `${provider.baseUrl}/v1/chat/completions`;
327
+ headers.set("Authorization", `Bearer ${provider.token}`);
328
+ }
329
+ }
330
+
196
331
  // src/client/messages.ts
197
332
  var Messages = class {
198
333
 
@@ -223,30 +358,23 @@ var Messages = class {
223
358
  parseResponseAsJson: false
224
359
  });
225
360
  }
226
- };
227
-
228
- // src/client/llm/constants.ts
229
- var PROVIDER_MAP = {
230
- openai: "https://api.openai.com",
231
- togetherai: "https://api.together.xyz"
232
- };
233
-
234
- // src/client/llm/utils.ts
235
- function appendLLMOptions(request, headers) {
236
- if ("llmProvider" in request) {
237
- const llmProvider = request.llmProvider;
238
- if (llmProvider === "openai") {
239
- const token = _nullishCoalesce(process.env.OPENAI_API_KEY, () => ( request.llmToken));
240
- request.url = _nullishCoalesce(request.url, () => ( `${PROVIDER_MAP[llmProvider]}/v1/chat/completion`));
241
- headers.set("Authorization", `Bearer ${token}`);
242
- }
243
- if (llmProvider === "togetherai") {
244
- const token = _nullishCoalesce(process.env.TOGETHER_API_KEY, () => ( request.llmToken));
245
- request.url = _nullishCoalesce(request.url, () => ( `${PROVIDER_MAP[llmProvider]}/v1/chat/completion`));
246
- headers.set("Authorization", `Bearer ${token}`);
247
- }
361
+ async deleteMany(messageIds) {
362
+ const result = await this.http.request({
363
+ method: "DELETE",
364
+ path: ["v2", "messages"],
365
+ headers: { "Content-Type": "application/json" },
366
+ body: JSON.stringify({ messageIds })
367
+ });
368
+ return result.cancelled;
248
369
  }
249
- }
370
+ async deleteAll() {
371
+ const result = await this.http.request({
372
+ method: "DELETE",
373
+ path: ["v2", "messages"]
374
+ });
375
+ return result.cancelled;
376
+ }
377
+ };
250
378
 
251
379
  // src/client/utils.ts
252
380
  var isIgnoredHeader = (header) => {
@@ -294,7 +422,7 @@ function processHeaders(request) {
294
422
  return headers;
295
423
  }
296
424
  function getRequestPath(request) {
297
- return _nullishCoalesce(_nullishCoalesce(request.url, () => ( request.urlGroup)), () => ( `api/${request.api}`));
425
+ return _nullishCoalesce(_nullishCoalesce(_nullishCoalesce(request.url, () => ( request.urlGroup)), () => ( request.topic)), () => ( `api/${_optionalChain([request, 'access', _14 => _14.api, 'optionalAccess', _15 => _15.name])}`));
298
426
  }
299
427
 
300
428
  // src/client/queue.ts
@@ -384,7 +512,7 @@ var Queue = class {
384
512
  async enqueueJSON(request) {
385
513
  const headers = prefixHeaders(new Headers(request.headers));
386
514
  headers.set("Content-Type", "application/json");
387
- appendLLMOptions(request, headers);
515
+ appendLLMOptionsIfNeeded(request, headers);
388
516
  const response = await this.enqueue({
389
517
  ...request,
390
518
  body: JSON.stringify(request.body),
@@ -576,125 +704,17 @@ var UrlGroups = class {
576
704
  }
577
705
  };
578
706
 
579
- // src/client/llm/chat.ts
580
- var Chat = (_class2 = class _Chat {
581
-
582
- constructor(http) {;_class2.prototype.__init3.call(this);_class2.prototype.__init4.call(this);_class2.prototype.__init5.call(this);
583
- this.http = http;
584
- }
585
- static toChatRequest(request) {
586
- const messages = [];
587
- messages.push(
588
- { role: "system", content: request.system },
589
- { role: "user", content: request.user }
590
- );
591
- const chatRequest = { ...request, messages };
592
- return chatRequest;
593
- }
594
- /**
595
- * Calls the Upstash completions api given a ChatRequest.
596
- *
597
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
598
- * if stream is enabled.
599
- *
600
- * @param request ChatRequest with messages
601
- * @returns Chat completion or stream
602
- */
603
- __init3() {this.create = async (request) => {
604
- if (request.provider === "openai" || request.provider === "togetherai")
605
- return this.createThirdParty(request);
606
- const body = JSON.stringify(request);
607
- if ("stream" in request && request.stream) {
608
- return this.http.requestStream({
609
- path: ["llm", "v1", "chat", "completions"],
610
- method: "POST",
611
- headers: {
612
- "Content-Type": "application/json",
613
- Connection: "keep-alive",
614
- Accept: "text/event-stream",
615
- "Cache-Control": "no-cache"
616
- },
617
- body
618
- });
619
- }
620
- return this.http.request({
621
- path: ["llm", "v1", "chat", "completions"],
622
- method: "POST",
623
- headers: { "Content-Type": "application/json" },
624
- body
625
- });
626
- }}
627
- /**
628
- * Calls the Upstash completions api given a ChatRequest.
629
- *
630
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
631
- * if stream is enabled.
632
- *
633
- * @param request ChatRequest with messages
634
- * @returns Chat completion or stream
635
- */
636
- __init4() {this.createThirdParty = async (request) => {
637
- if (request.provider === "openai" || request.provider === "togetherai") {
638
- const baseUrl = PROVIDER_MAP[request.provider];
639
- const llmToken = request.llmToken;
640
- delete request.llmToken;
641
- delete request.system;
642
- delete request.provider;
643
- const body = JSON.stringify(request);
644
- if ("stream" in request && request.stream) {
645
- return this.http.requestStream({
646
- path: ["v1", "chat", "completions"],
647
- method: "POST",
648
- headers: {
649
- "Content-Type": "application/json",
650
- Connection: "keep-alive",
651
- Accept: "text/event-stream",
652
- "Cache-Control": "no-cache",
653
- Authorization: `Bearer ${llmToken}`
654
- },
655
- body,
656
- baseUrl
657
- });
658
- }
659
- return this.http.request({
660
- path: ["v1", "chat", "completions"],
661
- method: "POST",
662
- headers: {
663
- "Content-Type": "application/json",
664
- Authorization: `Bearer ${llmToken}`
665
- },
666
- body,
667
- baseUrl
668
- });
669
- }
670
- throw new Error("Could not find any third party provider");
671
- }}
672
- /**
673
- * Calls the Upstash completions api given a PromptRequest.
674
- *
675
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
676
- * if stream is enabled.
677
- *
678
- * @param request PromptRequest with system and user messages.
679
- * Note that system parameter shouldn't be passed in the case of
680
- * mistralai/Mistral-7B-Instruct-v0.2 model.
681
- * @returns Chat completion or stream
682
- */
683
- __init5() {this.prompt = async (request) => {
684
- const chatRequest = _Chat.toChatRequest(request);
685
- return this.create(chatRequest);
686
- }}
687
- }, _class2);
688
-
689
707
  // src/client/client.ts
690
708
  var Client = class {
691
709
 
710
+
692
711
  constructor(config) {
693
712
  this.http = new HttpClient({
694
713
  retry: config.retry,
695
714
  baseUrl: config.baseUrl ? config.baseUrl.replace(/\/$/, "") : "https://qstash.upstash.io",
696
715
  authorization: `Bearer ${config.token}`
697
716
  });
717
+ this.token = config.token;
698
718
  }
699
719
  /**
700
720
  * Access the urlGroup API.
@@ -704,6 +724,16 @@ var Client = class {
704
724
  get urlGroups() {
705
725
  return new UrlGroups(this.http);
706
726
  }
727
+ /**
728
+ * Deprecated. Use urlGroups instead.
729
+ *
730
+ * Access the topic API.
731
+ *
732
+ * Create, read, update or delete topics.
733
+ */
734
+ get topics() {
735
+ return this.urlGroups;
736
+ }
707
737
  /**
708
738
  * Access the dlq API.
709
739
  *
@@ -734,7 +764,7 @@ var Client = class {
734
764
  * Create, read, update or delete queues.
735
765
  */
736
766
  queue(request) {
737
- return new Queue(this.http, _optionalChain([request, 'optionalAccess', _6 => _6.queueName]));
767
+ return new Queue(this.http, _optionalChain([request, 'optionalAccess', _16 => _16.queueName]));
738
768
  }
739
769
  /**
740
770
  * Access the Chat API
@@ -742,7 +772,7 @@ var Client = class {
742
772
  * Call the create or prompt methods
743
773
  */
744
774
  chat() {
745
- return new Chat(this.http);
775
+ return new Chat(this.http, this.token);
746
776
  }
747
777
  async publish(request) {
748
778
  const headers = processHeaders(request);
@@ -761,7 +791,7 @@ var Client = class {
761
791
  async publishJSON(request) {
762
792
  const headers = prefixHeaders(new Headers(request.headers));
763
793
  headers.set("Content-Type", "application/json");
764
- appendLLMOptions(request, headers);
794
+ appendLLMOptionsIfNeeded(request, headers);
765
795
  const response = await this.publish({
766
796
  ...request,
767
797
  headers,
@@ -803,7 +833,7 @@ var Client = class {
803
833
  message.body = JSON.stringify(message.body);
804
834
  }
805
835
  message.headers = new Headers(message.headers);
806
- appendLLMOptions(message, message.headers);
836
+ appendLLMOptionsIfNeeded(message, message.headers);
807
837
  message.headers.set("Content-Type", "application/json");
808
838
  }
809
839
  const response = await this.batch(request);
@@ -830,14 +860,16 @@ var Client = class {
830
860
  */
831
861
  async events(request) {
832
862
  const query = {};
833
- if (_optionalChain([request, 'optionalAccess', _7 => _7.cursor]) && request.cursor > 0) {
863
+ if (_optionalChain([request, 'optionalAccess', _17 => _17.cursor]) && request.cursor > 0) {
834
864
  query.cursor = request.cursor.toString();
835
865
  }
836
- for (const [key, value] of Object.entries(_nullishCoalesce(_optionalChain([request, 'optionalAccess', _8 => _8.filter]), () => ( {})))) {
866
+ for (const [key, value] of Object.entries(_nullishCoalesce(_optionalChain([request, 'optionalAccess', _18 => _18.filter]), () => ( {})))) {
837
867
  if (typeof value === "number" && value < 0) {
838
868
  continue;
839
869
  }
840
- if (typeof value !== "undefined") {
870
+ if (key === "urlGroup") {
871
+ query.topicName = value.toString();
872
+ } else if (typeof value !== "undefined") {
841
873
  query[key] = value.toString();
842
874
  }
843
875
  }
@@ -858,6 +890,34 @@ var Client = class {
858
890
  }
859
891
  };
860
892
 
893
+ // src/client/llm/providers.ts
894
+ var upstash = () => {
895
+ return {
896
+ owner: "upstash",
897
+ baseUrl: "https://qstash.upstash.io/llm",
898
+ token: ""
899
+ };
900
+ };
901
+ var openai = ({
902
+ token
903
+ }) => {
904
+ return { token, owner: "openai", baseUrl: "https://api.openai.com" };
905
+ };
906
+ var custom = ({
907
+ baseUrl,
908
+ token
909
+ }) => {
910
+ const trimmedBaseUrl = baseUrl.replace(/\/(v1\/)?chat\/completions$/, "");
911
+ return {
912
+ token,
913
+ owner: "custom",
914
+ baseUrl: trimmedBaseUrl
915
+ };
916
+ };
917
+
918
+
919
+
920
+
861
921
 
862
922
 
863
923
 
@@ -868,4 +928,4 @@ var Client = class {
868
928
 
869
929
 
870
930
 
871
- exports.Chat = Chat; exports.Client = Client; exports.Messages = Messages; exports.QstashChatRatelimitError = QstashChatRatelimitError; exports.QstashError = QstashError; exports.QstashRatelimitError = QstashRatelimitError; exports.Receiver = _chunkUUR7N6E6js.Receiver; exports.Schedules = Schedules; exports.SignatureError = _chunkUUR7N6E6js.SignatureError; exports.UrlGroups = UrlGroups;
931
+ exports.Chat = Chat; exports.Client = Client; exports.Messages = Messages; exports.QstashChatRatelimitError = QstashChatRatelimitError; exports.QstashError = QstashError; exports.QstashRatelimitError = QstashRatelimitError; exports.Receiver = _chunkUUR7N6E6js.Receiver; exports.Schedules = Schedules; exports.SignatureError = _chunkUUR7N6E6js.SignatureError; exports.UrlGroups = UrlGroups; exports.custom = custom; exports.openai = openai; exports.upstash = upstash;