@upstash/qstash 2.6.0 → 2.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/README.md +7 -9
  2. package/index.d.mts +206 -64
  3. package/index.d.ts +206 -64
  4. package/index.js +244 -148
  5. package/index.mjs +239 -143
  6. package/package.json +1 -1
package/README.md CHANGED
@@ -3,7 +3,6 @@
3
3
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/qstash)
4
4
 
5
5
  > [!NOTE] > **This project is in GA Stage.**
6
- >
7
6
  > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
8
7
  > The Upstash team is committed to maintaining and improving its functionality.
9
8
 
@@ -42,7 +41,7 @@ npm install @upstash/qstash
42
41
 
43
42
  ### Get your authorization token
44
43
 
45
- Go to [upstash](https://console.upstash.com/qstash) and copy the token.
44
+ Go to [Upstash Console](https://console.upstash.com/qstash) and copy the QSTASH_TOKEN.
46
45
 
47
46
  ## Basic Usage:
48
47
 
@@ -106,16 +105,14 @@ const isValid = await r.verify({
106
105
  No need for complicated setup your LLM request. We'll call LLM and schedule it for your serverless needs.
107
106
 
108
107
  ```ts
109
- import { Client } from "@upstash/qstash";
108
+ import { Client, openai } from "@upstash/qstash";
110
109
 
111
110
  const c = new Client({
112
111
  token: "<QSTASH_TOKEN>",
113
112
  });
114
113
 
115
114
  const result = await client.publishJSON({
116
- llmProvider: "openai", // We currently support open-ai and together-ai, but QStash will work with any OpenAI compatible API
117
- llmToken: "YOUR_TOKEN",
118
- url: "OPEN_AI_COMPATIBLE_BASE_URL",
115
+ api: { name: "llm", provider: openai({ token: process.env.OPENAI_API_KEY! }) },
119
116
  body: {
120
117
  model: "gpt-3.5-turbo",
121
118
  messages: [
@@ -131,13 +128,14 @@ const result = await client.publishJSON({
131
128
 
132
129
  ### Chatting with your favorite LLM
133
130
 
134
- You can easily start streaming Upstash, OpenAI or TogetherAI responses from your favorite framework(Next.js) or library
131
+ You can easily start streaming Upstash or OpenAI responses from your favorite framework(Next.js) or library
135
132
 
136
133
  ```ts
134
+ import { upstash } from "@upstash/qstash";
135
+
137
136
  const response = await client.chat().create({
138
- provider: "upstash", // Optionally, provider: "openai"
137
+ provider: upstash(), // Optionally, provider: "custom({token: "XXX", baseUrl: "https://api.openai.com"})". This will allow you to call every OpenAI compatible API out there.
139
138
  model: "meta-llama/Meta-Llama-3-8B-Instruct", // Optionally, model: "gpt-3.5-turbo",
140
- llmToken: process.env.OPENAI_API_KEY!,
141
139
  messages: [
142
140
  {
143
141
  role: "system",
package/index.d.mts CHANGED
@@ -68,7 +68,10 @@ type Event = {
68
68
  error?: string;
69
69
  url: string;
70
70
  urlGroup?: string;
71
+ topicName?: string;
71
72
  endpointName?: string;
73
+ header?: Record<string, string>;
74
+ body?: string;
72
75
  };
73
76
  type EventPayload = Omit<Event, "urlGroup"> & {
74
77
  topicName: string;
@@ -99,6 +102,32 @@ type RateLimit = {
99
102
  reset: string | null;
100
103
  };
101
104
 
105
+ type ProviderReturnType = {
106
+ owner: "upstash" | "openai" | "custom";
107
+ baseUrl: string;
108
+ token: string;
109
+ };
110
+ declare const upstash: () => {
111
+ owner: "upstash";
112
+ baseUrl: "https://qstash.upstash.io/llm";
113
+ token: string;
114
+ };
115
+ declare const openai: ({ token, }: {
116
+ token: string;
117
+ }) => {
118
+ owner: "openai";
119
+ baseUrl: "https://api.openai.com";
120
+ token: string;
121
+ };
122
+ declare const custom: ({ baseUrl, token, }: {
123
+ token: string;
124
+ baseUrl: string;
125
+ }) => {
126
+ owner: "custom";
127
+ baseUrl: string;
128
+ token: string;
129
+ };
130
+
102
131
  type ChatCompletionMessage = {
103
132
  role: "system" | "assistant" | "user";
104
133
  content: string;
@@ -164,7 +193,6 @@ type StreamDisabled = {
164
193
  } | object;
165
194
  type StreamParameter = StreamEnabled | StreamDisabled;
166
195
  type OpenAIChatModel = "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-vision-preview" | "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613";
167
- type TogetherAIChatModel = "zero-one-ai/Yi-34B-Chat" | "allenai/OLMo-7B-Instruct" | "allenai/OLMo-7B-Twin-2T" | "allenai/OLMo-7B" | "Austism/chronos-hermes-13b" | "cognitivecomputations/dolphin-2.5-mixtral-8x7b" | "databricks/dbrx-instruct" | "deepseek-ai/deepseek-coder-33b-instruct" | "deepseek-ai/deepseek-llm-67b-chat" | "garage-bAInd/Platypus2-70B-instruct" | "google/gemma-2b-it" | "google/gemma-7b-it" | "Gryphe/MythoMax-L2-13b" | "lmsys/vicuna-13b-v1.5" | "lmsys/vicuna-7b-v1.5" | "codellama/CodeLlama-13b-Instruct-hf" | "codellama/CodeLlama-34b-Instruct-hf" | "codellama/CodeLlama-70b-Instruct-hf" | "codellama/CodeLlama-7b-Instruct-hf" | "meta-llama/Llama-2-70b-chat-hf" | "meta-llama/Llama-2-13b-chat-hf" | "meta-llama/Llama-2-7b-chat-hf" | "meta-llama/Llama-3-8b-chat-hf" | "meta-llama/Llama-3-70b-chat-hf" | "mistralai/Mistral-7B-Instruct-v0.1" | "mistralai/Mistral-7B-Instruct-v0.2" | "mistralai/Mistral-7B-Instruct-v0.3" | "mistralai/Mixtral-8x7B-Instruct-v0.1" | "mistralai/Mixtral-8x22B-Instruct-v0.1" | "NousResearch/Nous-Capybara-7B-V1p9" | "NousResearch/Nous-Hermes-2-Mistral-7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT" | "NousResearch/Nous-Hermes-llama-2-7b" | "NousResearch/Nous-Hermes-Llama2-13b" | "NousResearch/Nous-Hermes-2-Yi-34B" | "openchat/openchat-3.5-1210" | "Open-Orca/Mistral-7B-OpenOrca" | "Qwen/Qwen1.5-0.5B-Chat" | "Qwen/Qwen1.5-1.8B-Chat" | "Qwen/Qwen1.5-4B-Chat" | "Qwen/Qwen1.5-7B-Chat" | "Qwen/Qwen1.5-14B-Chat" | "Qwen/Qwen1.5-32B-Chat" | "Qwen/Qwen1.5-72B-Chat" | "Qwen/Qwen1.5-110B-Chat" | "Qwen/Qwen2-72B-Instruct" | "snorkelai/Snorkel-Mistral-PairRM-DPO" | "Snowflake/snowflake-arctic-instruct" | "togethercomputer/alpaca-7b" | "teknium/OpenHermes-2-Mistral-7B" | "teknium/OpenHermes-2p5-Mistral-7B" | "togethercomputer/Llama-2-7B-32K-Instruct" | "togethercomputer/RedPajama-INCITE-Chat-3B-v1" | "togethercomputer/RedPajama-INCITE-7B-Chat" | "togethercomputer/StripedHyena-Nous-7B" | "Undi95/ReMM-SLERP-L2-13B" | "Undi95/Toppy-M-7B" | "WizardLM/WizardLM-13B-V1.2" | "upstage/SOLAR-10.7B-Instruct-v1.0";
168
196
  type ChatRequestCommonFields = {
169
197
  frequency_penalty?: number;
170
198
  logit_bias?: Record<string, number>;
@@ -186,18 +214,14 @@ type PromptChatRequestFields = ChatRequestCommonFields & {
186
214
  type ChatRequestFields = ChatRequestCommonFields & {
187
215
  messages: ChatCompletionMessage[];
188
216
  };
189
- type LlmProvider = "openai" | "togetherai";
190
- type LlmProviderBaseUrl = "https://api.openai.com" | "https://api.together.xyz";
191
217
  type ChatRequestProviders = {
192
- provider: "openai";
218
+ provider: ProviderReturnType;
193
219
  model: OpenAIChatModel;
194
- llmToken: string;
195
220
  } | {
196
- provider: "togetherai";
197
- model: TogetherAIChatModel;
198
- llmToken: string;
221
+ provider: ProviderReturnType;
222
+ model: string;
199
223
  } | {
200
- provider: "upstash";
224
+ provider: ProviderReturnType;
201
225
  model: ChatModel;
202
226
  };
203
227
  type PromptChatRequest<TStream extends StreamParameter> = ChatRequestProviders & PromptChatRequestFields & TStream;
@@ -268,10 +292,23 @@ type Message = {
268
292
  * The url group name if this message was sent to a urlGroup.
269
293
  */
270
294
  urlGroup?: string;
295
+ /**
296
+ * Deprecated. The topic name if this message was sent to a urlGroup. Use urlGroup instead
297
+ */
298
+ topicName?: string;
271
299
  /**
272
300
  * The url where this message is sent to.
273
301
  */
274
302
  url: string;
303
+ /**
304
+ * The endpoint name of the message if the endpoint is given a
305
+ * name within the url group.
306
+ */
307
+ endpointName?: string;
308
+ /**
309
+ * The api name if this message was sent to an api
310
+ */
311
+ api?: string;
275
312
  /**
276
313
  * The http method used to deliver the message
277
314
  */
@@ -284,6 +321,11 @@ type Message = {
284
321
  * The http body sent to your API
285
322
  */
286
323
  body?: string;
324
+ /**
325
+ * The base64 encoded body if the body contains non-UTF-8 characters,
326
+ * `None` otherwise.
327
+ */
328
+ bodyBase64?: string;
287
329
  /**
288
330
  * Maxmimum number of retries.
289
331
  */
@@ -308,6 +350,14 @@ type Message = {
308
350
  * The queue name if this message was sent to a queue.
309
351
  */
310
352
  queueName?: string;
353
+ /**
354
+ * The scheduleId of the message if the message is triggered by a schedule
355
+ */
356
+ scheduleId?: string;
357
+ /**
358
+ * IP address of the publisher of this message
359
+ */
360
+ callerIp?: string;
311
361
  };
312
362
  type MessagePayload = Omit<Message, "urlGroup"> & {
313
363
  topicName: string;
@@ -323,10 +373,75 @@ declare class Messages {
323
373
  * Cancel a message
324
374
  */
325
375
  delete(messageId: string): Promise<void>;
376
+ deleteMany(messageIds: string[]): Promise<number>;
377
+ deleteAll(): Promise<number>;
326
378
  }
327
379
 
328
380
  type DlqMessage = Message & {
381
+ /**
382
+ * The unique id within the DLQ
383
+ */
329
384
  dlqId: string;
385
+ /**
386
+ * The HTTP status code of the last failed delivery attempt
387
+ */
388
+ responseStatus?: number;
389
+ /**
390
+ * The response headers of the last failed delivery attempt
391
+ */
392
+ responseHeader?: Record<string, string[]>;
393
+ /**
394
+ * The response body of the last failed delivery attempt if it is
395
+ * composed of UTF-8 characters only, `None` otherwise.
396
+ */
397
+ responseBody?: string;
398
+ /**
399
+ * The base64 encoded response body of the last failed delivery attempt
400
+ * if the response body contains non-UTF-8 characters, `None` otherwise.
401
+ */
402
+ responseBodyBase64?: string;
403
+ };
404
+ type DLQFilter = {
405
+ /**
406
+ * Filter DLQ entries by message id
407
+ */
408
+ messageId?: string;
409
+ /**
410
+ * Filter DLQ entries by url
411
+ */
412
+ url?: string;
413
+ /**
414
+ * Filter DLQ entries by url group name
415
+ */
416
+ urlGroup?: string;
417
+ /**
418
+ * Filter DLQ entries by api name
419
+ */
420
+ api?: string;
421
+ /**
422
+ * Filter DLQ entries by queue name
423
+ */
424
+ queueName?: string;
425
+ /**
426
+ * Filter DLQ entries by schedule id
427
+ */
428
+ scheduleId?: string;
429
+ /**
430
+ * Filter DLQ entries by starting time, in milliseconds
431
+ */
432
+ fromDate?: number;
433
+ /**
434
+ * Filter DLQ entries by ending time, in milliseconds
435
+ */
436
+ toDate?: number;
437
+ /**
438
+ * Filter DLQ entries by HTTP status of the response
439
+ */
440
+ responseStatus?: number;
441
+ /**
442
+ * Filter DLQ entries by IP address of the publisher of the message
443
+ */
444
+ callerIp?: string;
330
445
  };
331
446
  declare class DLQ {
332
447
  private readonly http;
@@ -336,6 +451,8 @@ declare class DLQ {
336
451
  */
337
452
  listMessages(options?: {
338
453
  cursor?: string;
454
+ count?: number;
455
+ filter?: DLQFilter;
339
456
  }): Promise<{
340
457
  messages: DlqMessage[];
341
458
  cursor?: string;
@@ -354,6 +471,45 @@ declare class DLQ {
354
471
  }>;
355
472
  }
356
473
 
474
+ declare class Chat {
475
+ private http;
476
+ private token;
477
+ constructor(http: Requester, token: string);
478
+ private static toChatRequest;
479
+ /**
480
+ * Calls the Upstash completions api given a ChatRequest.
481
+ *
482
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
483
+ * if stream is enabled.
484
+ *
485
+ * @param request ChatRequest with messages
486
+ * @returns Chat completion or stream
487
+ */
488
+ create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
489
+ /**
490
+ * Calls the Upstash completions api given a ChatRequest.
491
+ *
492
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
493
+ * if stream is enabled.
494
+ *
495
+ * @param request ChatRequest with messages
496
+ * @returns Chat completion or stream
497
+ */
498
+ private createThirdParty;
499
+ /**
500
+ * Calls the Upstash completions api given a PromptRequest.
501
+ *
502
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
503
+ * if stream is enabled.
504
+ *
505
+ * @param request PromptRequest with system and user messages.
506
+ * Note that system parameter shouldn't be passed in the case of
507
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
508
+ * @returns Chat completion or stream
509
+ */
510
+ prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
511
+ }
512
+
357
513
  type QueueResponse = {
358
514
  createdAt: number;
359
515
  updatedAt: number;
@@ -426,10 +582,12 @@ type Schedule = {
426
582
  method: string;
427
583
  header?: Record<string, string[]>;
428
584
  body?: string;
585
+ bodyBase64?: string;
429
586
  retries: number;
430
587
  delay?: number;
431
588
  callback?: string;
432
589
  failureCallback?: string;
590
+ callerIp?: string;
433
591
  isPaused: true | undefined;
434
592
  };
435
593
  type CreateScheduleRequest = {
@@ -621,44 +779,6 @@ declare class UrlGroups {
621
779
  delete(name: string): Promise<void>;
622
780
  }
623
781
 
624
- declare class Chat {
625
- private http;
626
- constructor(http: Requester);
627
- private static toChatRequest;
628
- /**
629
- * Calls the Upstash completions api given a ChatRequest.
630
- *
631
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
632
- * if stream is enabled.
633
- *
634
- * @param request ChatRequest with messages
635
- * @returns Chat completion or stream
636
- */
637
- create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
638
- /**
639
- * Calls the Upstash completions api given a ChatRequest.
640
- *
641
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
642
- * if stream is enabled.
643
- *
644
- * @param request ChatRequest with messages
645
- * @returns Chat completion or stream
646
- */
647
- private createThirdParty;
648
- /**
649
- * Calls the Upstash completions api given a PromptRequest.
650
- *
651
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
652
- * if stream is enabled.
653
- *
654
- * @param request PromptRequest with system and user messages.
655
- * Note that system parameter shouldn't be passed in the case of
656
- * mistralai/Mistral-7B-Instruct-v0.2 model.
657
- * @returns Chat completion or stream
658
- */
659
- prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
660
- }
661
-
662
782
  type ClientConfig = {
663
783
  /**
664
784
  * Url of the qstash api server.
@@ -793,7 +913,7 @@ type PublishRequest<TBody = BodyInit> = {
793
913
  url: string;
794
914
  urlGroup?: never;
795
915
  api?: never;
796
- llmProvider?: never;
916
+ topic?: never;
797
917
  } | {
798
918
  url?: never;
799
919
  /**
@@ -801,30 +921,26 @@ type PublishRequest<TBody = BodyInit> = {
801
921
  */
802
922
  urlGroup: string;
803
923
  api?: never;
804
- llmProvider?: never;
924
+ topic?: never;
805
925
  } | {
806
- url?: never;
926
+ url?: string;
807
927
  urlGroup?: never;
808
928
  /**
809
929
  * The api endpoint the request should be sent to.
810
930
  */
811
- api: "llm";
812
- llmProvider?: never;
931
+ api: {
932
+ name: "llm";
933
+ provider?: ProviderReturnType;
934
+ };
935
+ topic?: never;
813
936
  } | {
814
- /**
815
- * 3rd party provider url such as OpenAI: https://api.openai.com/v1/chat/completions
816
- */
817
- url?: string;
937
+ url?: never;
818
938
  urlGroup?: never;
819
- api?: never;
820
- /**
821
- * 3rd party provider name such as OpenAI, TogetherAI
822
- */
823
- llmProvider: LlmProvider;
939
+ api: never;
824
940
  /**
825
- * 3rd party provider secret key
941
+ * Deprecated. The topic the message should be sent to. Same as urlGroup
826
942
  */
827
- llmToken?: string;
943
+ topic?: string;
828
944
  });
829
945
  type PublishJsonRequest = Omit<PublishRequest, "body"> & {
830
946
  /**
@@ -842,6 +958,8 @@ type EventsRequestFilter = {
842
958
  state?: State;
843
959
  url?: string;
844
960
  urlGroup?: string;
961
+ topicName?: string;
962
+ api?: string;
845
963
  scheduleId?: string;
846
964
  queueName?: string;
847
965
  fromDate?: number;
@@ -857,6 +975,7 @@ type QueueRequest = {
857
975
  };
858
976
  declare class Client {
859
977
  http: Requester;
978
+ private token;
860
979
  constructor(config: ClientConfig);
861
980
  /**
862
981
  * Access the urlGroup API.
@@ -864,6 +983,14 @@ declare class Client {
864
983
  * Create, read, update or delete urlGroups.
865
984
  */
866
985
  get urlGroups(): UrlGroups;
986
+ /**
987
+ * Deprecated. Use urlGroups instead.
988
+ *
989
+ * Access the topic API.
990
+ *
991
+ * Create, read, update or delete topics.
992
+ */
993
+ get topics(): UrlGroups;
867
994
  /**
868
995
  * Access the dlq API.
869
996
  *
@@ -950,10 +1077,25 @@ declare class QstashError extends Error {
950
1077
  constructor(message: string);
951
1078
  }
952
1079
  declare class QstashRatelimitError extends QstashError {
1080
+ limit: string | null;
1081
+ remaining: string | null;
1082
+ reset: string | null;
953
1083
  constructor(args: RateLimit);
954
1084
  }
955
1085
  declare class QstashChatRatelimitError extends QstashError {
1086
+ limitRequests: string | null;
1087
+ limitTokens: string | null;
1088
+ remainingRequests: string | null;
1089
+ remainingTokens: string | null;
1090
+ resetRequests: string | null;
1091
+ resetTokens: string | null;
956
1092
  constructor(args: ChatRateLimit);
957
1093
  }
1094
+ declare class QstashDailyRatelimitError extends QstashError {
1095
+ limit: string | null;
1096
+ remaining: string | null;
1097
+ reset: string | null;
1098
+ constructor(args: RateLimit);
1099
+ }
958
1100
 
959
- export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventPayload, type EventsRequest, type GetEventsPayload, type GetEventsResponse, type HeadersInit, type LlmProvider, type LlmProviderBaseUrl, type Message, type MessagePayload, Messages, type OpenAIChatModel, type PromptChatRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToUrlGroupsResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type TogetherAIChatModel, type UrlGroup, UrlGroups, type VerifyRequest, type WithCursor };
1101
+ export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventPayload, type EventsRequest, type GetEventsPayload, type GetEventsResponse, type HeadersInit, type Message, type MessagePayload, Messages, type OpenAIChatModel, type PromptChatRequest, type ProviderReturnType, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToUrlGroupsResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashDailyRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type UrlGroup, UrlGroups, type VerifyRequest, type WithCursor, custom, openai, upstash };