@upstash/qstash 2.5.5 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,11 @@
2
2
 
3
3
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/qstash)
4
4
 
5
+ > [!NOTE] > **This project is in GA Stage.**
6
+ >
7
+ > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
8
+ > The Upstash team is committed to maintaining and improving its functionality.
9
+
5
10
  **QStash** is an HTTP based messaging and scheduling solution for serverless and
6
11
  edge runtimes.
7
12
 
@@ -57,7 +62,7 @@ const c = new Client({
57
62
 
58
63
  const res = await c.publishJSON({
59
64
  url: "https://my-api...",
60
- // or topic: "the name or id of a topic"
65
+ // or urlGroup: "the name or id of a url group"
61
66
  body: {
62
67
  hello: "world",
63
68
  },
@@ -96,6 +101,58 @@ const isValid = await r.verify({
96
101
  })
97
102
  ```
98
103
 
104
+ ### Publishing a message to Open AI or any Open AI Compatible LLM
105
+
106
+ No need for complicated setup your LLM request. We'll call LLM and schedule it for your serverless needs.
107
+
108
+ ```ts
109
+ import { Client } from "@upstash/qstash";
110
+
111
+ const c = new Client({
112
+ token: "<QSTASH_TOKEN>",
113
+ });
114
+
115
+ const result = await client.publishJSON({
116
+ llmProvider: "openai", // We currently support open-ai and together-ai, but QStash will work with any OpenAI compatible API
117
+ llmToken: "YOUR_TOKEN",
118
+ url: "OPEN_AI_COMPATIBLE_BASE_URL",
119
+ body: {
120
+ model: "gpt-3.5-turbo",
121
+ messages: [
122
+ {
123
+ role: "user",
124
+ content: "Where is the capital of Turkey?",
125
+ },
126
+ ],
127
+ },
128
+ callback: "https://oz.requestcatcher.com/",
129
+ });
130
+ ```
131
+
132
+ ### Chatting with your favorite LLM
133
+
134
+ You can easily start streaming Upstash, OpenAI or TogetherAI responses from your favorite framework(Next.js) or library
135
+
136
+ ```ts
137
+ const response = await client.chat().create({
138
+ provider: "upstash", // Optionally, provider: "openai"
139
+ model: "meta-llama/Meta-Llama-3-8B-Instruct", // Optionally, model: "gpt-3.5-turbo",
140
+ llmToken: process.env.OPENAI_API_KEY!,
141
+ messages: [
142
+ {
143
+ role: "system",
144
+ content: "from now on, foo is whale",
145
+ },
146
+ {
147
+ role: "user",
148
+ content: "what exactly is foo?",
149
+ },
150
+ ],
151
+ stream: true,
152
+ temperature: 0.5,
153
+ });
154
+ ```
155
+
99
156
  ## Docs
100
157
 
101
158
  See [the documentation](https://docs.upstash.com/qstash) for details.
package/index.d.mts CHANGED
@@ -67,9 +67,16 @@ type Event = {
67
67
  nextDeliveryTime?: number;
68
68
  error?: string;
69
69
  url: string;
70
- topicName?: string;
70
+ urlGroup?: string;
71
71
  endpointName?: string;
72
72
  };
73
+ type EventPayload = Omit<Event, "urlGroup"> & {
74
+ topicName: string;
75
+ };
76
+ type GetEventsPayload = {
77
+ cursor?: number;
78
+ events: EventPayload[];
79
+ };
73
80
  type WithCursor<T> = T & {
74
81
  cursor?: number;
75
82
  };
@@ -156,9 +163,9 @@ type StreamDisabled = {
156
163
  stream: false;
157
164
  } | object;
158
165
  type StreamParameter = StreamEnabled | StreamDisabled;
159
- type ChatRequest<TStream extends StreamParameter> = {
160
- messages: ChatCompletionMessage[];
161
- model: ChatModel;
166
+ type OpenAIChatModel = "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-vision-preview" | "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613";
167
+ type TogetherAIChatModel = "zero-one-ai/Yi-34B-Chat" | "allenai/OLMo-7B-Instruct" | "allenai/OLMo-7B-Twin-2T" | "allenai/OLMo-7B" | "Austism/chronos-hermes-13b" | "cognitivecomputations/dolphin-2.5-mixtral-8x7b" | "databricks/dbrx-instruct" | "deepseek-ai/deepseek-coder-33b-instruct" | "deepseek-ai/deepseek-llm-67b-chat" | "garage-bAInd/Platypus2-70B-instruct" | "google/gemma-2b-it" | "google/gemma-7b-it" | "Gryphe/MythoMax-L2-13b" | "lmsys/vicuna-13b-v1.5" | "lmsys/vicuna-7b-v1.5" | "codellama/CodeLlama-13b-Instruct-hf" | "codellama/CodeLlama-34b-Instruct-hf" | "codellama/CodeLlama-70b-Instruct-hf" | "codellama/CodeLlama-7b-Instruct-hf" | "meta-llama/Llama-2-70b-chat-hf" | "meta-llama/Llama-2-13b-chat-hf" | "meta-llama/Llama-2-7b-chat-hf" | "meta-llama/Llama-3-8b-chat-hf" | "meta-llama/Llama-3-70b-chat-hf" | "mistralai/Mistral-7B-Instruct-v0.1" | "mistralai/Mistral-7B-Instruct-v0.2" | "mistralai/Mistral-7B-Instruct-v0.3" | "mistralai/Mixtral-8x7B-Instruct-v0.1" | "mistralai/Mixtral-8x22B-Instruct-v0.1" | "NousResearch/Nous-Capybara-7B-V1p9" | "NousResearch/Nous-Hermes-2-Mistral-7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT" | "NousResearch/Nous-Hermes-llama-2-7b" | "NousResearch/Nous-Hermes-Llama2-13b" | "NousResearch/Nous-Hermes-2-Yi-34B" | "openchat/openchat-3.5-1210" | "Open-Orca/Mistral-7B-OpenOrca" | "Qwen/Qwen1.5-0.5B-Chat" | "Qwen/Qwen1.5-1.8B-Chat" | "Qwen/Qwen1.5-4B-Chat" | "Qwen/Qwen1.5-7B-Chat" | "Qwen/Qwen1.5-14B-Chat" | "Qwen/Qwen1.5-32B-Chat" | "Qwen/Qwen1.5-72B-Chat" | "Qwen/Qwen1.5-110B-Chat" | "Qwen/Qwen2-72B-Instruct" | "snorkelai/Snorkel-Mistral-PairRM-DPO" | "Snowflake/snowflake-arctic-instruct" | "togethercomputer/alpaca-7b" | "teknium/OpenHermes-2-Mistral-7B" | "teknium/OpenHermes-2p5-Mistral-7B" | "togethercomputer/Llama-2-7B-32K-Instruct" | "togethercomputer/RedPajama-INCITE-Chat-3B-v1" | "togethercomputer/RedPajama-INCITE-7B-Chat" | "togethercomputer/StripedHyena-Nous-7B" | "Undi95/ReMM-SLERP-L2-13B" | "Undi95/Toppy-M-7B" | "WizardLM/WizardLM-13B-V1.2" | "upstage/SOLAR-10.7B-Instruct-v1.0";
168
+ type ChatRequestCommonFields = {
162
169
  frequency_penalty?: number;
163
170
  logit_bias?: Record<string, number>;
164
171
  logprobs?: boolean;
@@ -171,11 +178,30 @@ type ChatRequest<TStream extends StreamParameter> = {
171
178
  stop?: string | string[];
172
179
  temperature?: number;
173
180
  top_p?: number;
174
- } & TStream;
175
- type PromptRequest<TStream extends StreamParameter> = Omit<ChatRequest<TStream>, "messages" | "stream"> & {
181
+ };
182
+ type PromptChatRequestFields = ChatRequestCommonFields & {
176
183
  system: string;
177
184
  user: string;
178
- } & TStream;
185
+ };
186
+ type ChatRequestFields = ChatRequestCommonFields & {
187
+ messages: ChatCompletionMessage[];
188
+ };
189
+ type LlmProvider = "openai" | "togetherai";
190
+ type LlmProviderBaseUrl = "https://api.openai.com" | "https://api.together.xyz";
191
+ type ChatRequestProviders = {
192
+ provider: "openai";
193
+ model: OpenAIChatModel;
194
+ llmToken: string;
195
+ } | {
196
+ provider: "togetherai";
197
+ model: TogetherAIChatModel;
198
+ llmToken: string;
199
+ } | {
200
+ provider: "upstash";
201
+ model: ChatModel;
202
+ };
203
+ type PromptChatRequest<TStream extends StreamParameter> = ChatRequestProviders & PromptChatRequestFields & TStream;
204
+ type ChatRequest<TStream extends StreamParameter> = ChatRequestProviders & ChatRequestFields & TStream;
179
205
 
180
206
  type UpstashRequest = {
181
207
  /**
@@ -198,7 +224,7 @@ type UpstashRequest = {
198
224
  /**
199
225
  * A string to set request's method.
200
226
  */
201
- method?: "GET" | "POST" | "PUT" | "DELETE";
227
+ method?: "GET" | "POST" | "PUT" | "DELETE" | "PATCH";
202
228
  query?: Record<string, string | number | boolean | undefined>;
203
229
  /**
204
230
  * if enabled, call `res.json()`
@@ -206,6 +232,7 @@ type UpstashRequest = {
206
232
  * @default true
207
233
  */
208
234
  parseResponseAsJson?: boolean;
235
+ baseUrl?: string;
209
236
  };
210
237
  type UpstashResponse<TResult> = TResult & {
211
238
  error?: string;
@@ -238,9 +265,9 @@ type Message = {
238
265
  */
239
266
  messageId: string;
240
267
  /**
241
- * The topic name if this message was sent to a topic.
268
+ * The url group name if this message was sent to a urlGroup.
242
269
  */
243
- topicName?: string;
270
+ urlGroup?: string;
244
271
  /**
245
272
  * The url where this message is sent to.
246
273
  */
@@ -282,6 +309,9 @@ type Message = {
282
309
  */
283
310
  queueName?: string;
284
311
  };
312
+ type MessagePayload = Omit<Message, "urlGroup"> & {
313
+ topicName: string;
314
+ };
285
315
  declare class Messages {
286
316
  private readonly http;
287
317
  constructor(http: Requester);
@@ -330,9 +360,22 @@ type QueueResponse = {
330
360
  name: string;
331
361
  parallelism: number;
332
362
  lag: number;
363
+ paused?: boolean;
333
364
  };
334
365
  type UpsertQueueRequest = {
335
- parallelism: number;
366
+ /**
367
+ * The number of parallel consumers consuming from the queue.
368
+ *
369
+ * @default 1
370
+ */
371
+ parallelism?: number;
372
+ /**
373
+ * Whether to pause the queue or not. A paused queue will not
374
+ * deliver new messages until it is resumed.
375
+ *
376
+ * @default false
377
+ */
378
+ paused?: boolean;
336
379
  };
337
380
  declare class Queue {
338
381
  private readonly http;
@@ -362,6 +405,17 @@ declare class Queue {
362
405
  * Enqueue a message to a queue, serializing the body to JSON.
363
406
  */
364
407
  enqueueJSON<TBody = unknown, TRequest extends PublishRequest<TBody> = PublishRequest<TBody>>(request: TRequest): Promise<PublishResponse<TRequest>>;
408
+ /**
409
+ * Pauses the queue.
410
+ *
411
+ * A paused queue will not deliver messages until
412
+ * it is resumed.
413
+ */
414
+ pause(): Promise<void>;
415
+ /**
416
+ * Resumes the queue.
417
+ */
418
+ resume(): Promise<void>;
365
419
  }
366
420
 
367
421
  type Schedule = {
@@ -376,10 +430,11 @@ type Schedule = {
376
430
  delay?: number;
377
431
  callback?: string;
378
432
  failureCallback?: string;
433
+ isPaused: true | undefined;
379
434
  };
380
435
  type CreateScheduleRequest = {
381
436
  /**
382
- * Either a URL or topic name
437
+ * Either a URL or urlGroup name
383
438
  */
384
439
  destination: string;
385
440
  /**
@@ -441,6 +496,17 @@ type CreateScheduleRequest = {
441
496
  * Specify a cron expression to repeatedly send this message to the destination.
442
497
  */
443
498
  cron: string;
499
+ /**
500
+ * The HTTP timeout value to use while calling the destination URL.
501
+ * When a timeout is specified, it will be used instead of the maximum timeout
502
+ * value permitted by the QStash plan. It is useful in scenarios, where a message
503
+ * should be delivered with a shorter timeout.
504
+ *
505
+ * In seconds.
506
+ *
507
+ * @default undefined
508
+ */
509
+ timeout?: number;
444
510
  };
445
511
  declare class Schedules {
446
512
  private readonly http;
@@ -463,6 +529,21 @@ declare class Schedules {
463
529
  * Delete a schedule
464
530
  */
465
531
  delete(scheduleId: string): Promise<void>;
532
+ /**
533
+ * Pauses the schedule.
534
+ *
535
+ * A paused schedule will not deliver messages until
536
+ * it is resumed.
537
+ */
538
+ pause({ schedule }: {
539
+ schedule: string;
540
+ }): Promise<void>;
541
+ /**
542
+ * Resumes the schedule.
543
+ */
544
+ resume({ schedule }: {
545
+ schedule: string;
546
+ }): Promise<void>;
466
547
  }
467
548
 
468
549
  type Endpoint = {
@@ -477,7 +558,7 @@ type Endpoint = {
477
558
  };
478
559
  type AddEndpointsRequest = {
479
560
  /**
480
- * The name of the topic.
561
+ * The name of the url group.
481
562
  * Must be unique and only contain alphanumeric, hyphen, underscore and periods.
482
563
  */
483
564
  name: string;
@@ -485,7 +566,7 @@ type AddEndpointsRequest = {
485
566
  };
486
567
  type RemoveEndpointsRequest = {
487
568
  /**
488
- * The name of the topic.
569
+ * The name of the url group.
489
570
  * Must be unique and only contain alphanumeric, hyphen, underscore and periods.
490
571
  */
491
572
  name: string;
@@ -497,7 +578,7 @@ type RemoveEndpointsRequest = {
497
578
  url: string;
498
579
  })[];
499
580
  };
500
- type Topic = {
581
+ type UrlGroup = {
501
582
  /**
502
583
  * A unix timestamp (milliseconds)
503
584
  */
@@ -507,7 +588,7 @@ type Topic = {
507
588
  */
508
589
  updatedAt: number;
509
590
  /**
510
- * The name of this topic.
591
+ * The name of this url group.
511
592
  */
512
593
  name: string;
513
594
  /**
@@ -515,27 +596,27 @@ type Topic = {
515
596
  */
516
597
  endpoints: Endpoint[];
517
598
  };
518
- declare class Topics {
599
+ declare class UrlGroups {
519
600
  private readonly http;
520
601
  constructor(http: Requester);
521
602
  /**
522
- * Create a new topic with the given name and endpoints
603
+ * Create a new url group with the given name and endpoints
523
604
  */
524
605
  addEndpoints(request: AddEndpointsRequest): Promise<void>;
525
606
  /**
526
- * Remove endpoints from a topic.
607
+ * Remove endpoints from a url group.
527
608
  */
528
609
  removeEndpoints(request: RemoveEndpointsRequest): Promise<void>;
529
610
  /**
530
- * Get a list of all topics.
611
+ * Get a list of all url groups.
531
612
  */
532
- list(): Promise<Topic[]>;
613
+ list(): Promise<UrlGroup[]>;
533
614
  /**
534
- * Get a single topic
615
+ * Get a single url group
535
616
  */
536
- get(name: string): Promise<Topic>;
617
+ get(name: string): Promise<UrlGroup>;
537
618
  /**
538
- * Delete a topic
619
+ * Delete a url group
539
620
  */
540
621
  delete(name: string): Promise<void>;
541
622
  }
@@ -554,6 +635,16 @@ declare class Chat {
554
635
  * @returns Chat completion or stream
555
636
  */
556
637
  create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
638
+ /**
639
+ * Calls the Upstash completions api given a ChatRequest.
640
+ *
641
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
642
+ * if stream is enabled.
643
+ *
644
+ * @param request ChatRequest with messages
645
+ * @returns Chat completion or stream
646
+ */
647
+ private createThirdParty;
557
648
  /**
558
649
  * Calls the Upstash completions api given a PromptRequest.
559
650
  *
@@ -565,7 +656,7 @@ declare class Chat {
565
656
  * mistralai/Mistral-7B-Instruct-v0.2 model.
566
657
  * @returns Chat completion or stream
567
658
  */
568
- prompt: <TStream extends StreamParameter>(request: PromptRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
659
+ prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
569
660
  }
570
661
 
571
662
  type ClientConfig = {
@@ -684,27 +775,56 @@ type PublishRequest<TBody = BodyInit> = {
684
775
  * @default `POST`
685
776
  */
686
777
  method?: "GET" | "POST" | "PUT" | "DELETE" | "PATCH";
778
+ /**
779
+ * The HTTP timeout value to use while calling the destination URL.
780
+ * When a timeout is specified, it will be used instead of the maximum timeout
781
+ * value permitted by the QStash plan. It is useful in scenarios, where a message
782
+ * should be delivered with a shorter timeout.
783
+ *
784
+ * In seconds.
785
+ *
786
+ * @default undefined
787
+ */
788
+ timeout?: number;
687
789
  } & ({
688
790
  /**
689
791
  * The url where the message should be sent to.
690
792
  */
691
793
  url: string;
692
- topic?: never;
794
+ urlGroup?: never;
693
795
  api?: never;
796
+ llmProvider?: never;
694
797
  } | {
695
798
  url?: never;
696
799
  /**
697
- * The topic the message should be sent to.
800
+ * The url group the message should be sent to.
698
801
  */
699
- topic: string;
802
+ urlGroup: string;
700
803
  api?: never;
804
+ llmProvider?: never;
701
805
  } | {
702
806
  url?: never;
703
- topic?: never;
807
+ urlGroup?: never;
704
808
  /**
705
809
  * The api endpoint the request should be sent to.
706
810
  */
707
811
  api: "llm";
812
+ llmProvider?: never;
813
+ } | {
814
+ /**
815
+ * 3rd party provider url such as OpenAI: https://api.openai.com/v1/chat/completions
816
+ */
817
+ url?: string;
818
+ urlGroup?: never;
819
+ api?: never;
820
+ /**
821
+ * 3rd party provider name such as OpenAI, TogetherAI
822
+ */
823
+ llmProvider: LlmProvider;
824
+ /**
825
+ * 3rd party provider secret key
826
+ */
827
+ llmToken?: string;
708
828
  });
709
829
  type PublishJsonRequest = Omit<PublishRequest, "body"> & {
710
830
  /**
@@ -721,7 +841,7 @@ type EventsRequestFilter = {
721
841
  messageId?: string;
722
842
  state?: State;
723
843
  url?: string;
724
- topicName?: string;
844
+ urlGroup?: string;
725
845
  scheduleId?: string;
726
846
  queueName?: string;
727
847
  fromDate?: number;
@@ -739,11 +859,11 @@ declare class Client {
739
859
  http: Requester;
740
860
  constructor(config: ClientConfig);
741
861
  /**
742
- * Access the topic API.
862
+ * Access the urlGroup API.
743
863
  *
744
- * Create, read, update or delete topics.
864
+ * Create, read, update or delete urlGroups.
745
865
  */
746
- get topics(): Topics;
866
+ get urlGroups(): UrlGroups;
747
867
  /**
748
868
  * Access the dlq API.
749
869
  *
@@ -816,12 +936,12 @@ type PublishToUrlResponse = PublishToApiResponse & {
816
936
  url: string;
817
937
  deduplicated?: boolean;
818
938
  };
819
- type PublishToTopicResponse = PublishToUrlResponse[];
939
+ type PublishToUrlGroupsResponse = PublishToUrlResponse[];
820
940
  type PublishResponse<TRequest> = TRequest extends {
821
941
  url: string;
822
942
  } ? PublishToUrlResponse : TRequest extends {
823
- topic: string;
824
- } ? PublishToTopicResponse : PublishToApiResponse;
943
+ urlGroup: string;
944
+ } ? PublishToUrlGroupsResponse : PublishToApiResponse;
825
945
 
826
946
  /**
827
947
  * Result of 500 Internal Server Error
@@ -836,4 +956,4 @@ declare class QstashChatRatelimitError extends QstashError {
836
956
  constructor(args: ChatRateLimit);
837
957
  }
838
958
 
839
- export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventsRequest, type GetEventsResponse, type HeadersInit, type Message, Messages, type PromptRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToTopicResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type Topic, Topics, type VerifyRequest, type WithCursor };
959
+ export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventPayload, type EventsRequest, type GetEventsPayload, type GetEventsResponse, type HeadersInit, type LlmProvider, type LlmProviderBaseUrl, type Message, type MessagePayload, Messages, type OpenAIChatModel, type PromptChatRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToUrlGroupsResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type TogetherAIChatModel, type UrlGroup, UrlGroups, type VerifyRequest, type WithCursor };