@upstash/qstash 2.6.0 → 2.7.0-canary

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (6) hide show
  1. package/README.md +7 -9
  2. package/index.d.mts +168 -68
  3. package/index.d.ts +168 -68
  4. package/index.js +184 -144
  5. package/index.mjs +179 -139
  6. package/package.json +1 -1
package/README.md CHANGED
@@ -3,7 +3,6 @@
3
3
  ![npm (scoped)](https://img.shields.io/npm/v/@upstash/qstash)
4
4
 
5
5
  > [!NOTE] > **This project is in GA Stage.**
6
- >
7
6
  > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
8
7
  > The Upstash team is committed to maintaining and improving its functionality.
9
8
 
@@ -42,7 +41,7 @@ npm install @upstash/qstash
42
41
 
43
42
  ### Get your authorization token
44
43
 
45
- Go to [upstash](https://console.upstash.com/qstash) and copy the token.
44
+ Go to [Upstash Console](https://console.upstash.com/qstash) and copy the QSTASH_TOKEN.
46
45
 
47
46
  ## Basic Usage:
48
47
 
@@ -106,16 +105,14 @@ const isValid = await r.verify({
106
105
  No need for complicated setup your LLM request. We'll call LLM and schedule it for your serverless needs.
107
106
 
108
107
  ```ts
109
- import { Client } from "@upstash/qstash";
108
+ import { Client, openai } from "@upstash/qstash";
110
109
 
111
110
  const c = new Client({
112
111
  token: "<QSTASH_TOKEN>",
113
112
  });
114
113
 
115
114
  const result = await client.publishJSON({
116
- llmProvider: "openai", // We currently support open-ai and together-ai, but QStash will work with any OpenAI compatible API
117
- llmToken: "YOUR_TOKEN",
118
- url: "OPEN_AI_COMPATIBLE_BASE_URL",
115
+ api: { name: "llm", provider: openai({ token: process.env.OPENAI_API_KEY! }) },
119
116
  body: {
120
117
  model: "gpt-3.5-turbo",
121
118
  messages: [
@@ -131,13 +128,14 @@ const result = await client.publishJSON({
131
128
 
132
129
  ### Chatting with your favorite LLM
133
130
 
134
- You can easily start streaming Upstash, OpenAI or TogetherAI responses from your favorite framework(Next.js) or library
131
+ You can easily start streaming Upstash or OpenAI responses from your favorite framework(Next.js) or library
135
132
 
136
133
  ```ts
134
+ import { upstash } from "@upstash/qstash";
135
+
137
136
  const response = await client.chat().create({
138
- provider: "upstash", // Optionally, provider: "openai"
137
+ provider: upstash(), // Optionally, provider: "custom({token: "XXX", baseUrl: "https://api.openai.com"})". This will allow you to call every OpenAI compatible API out there.
139
138
  model: "meta-llama/Meta-Llama-3-8B-Instruct", // Optionally, model: "gpt-3.5-turbo",
140
- llmToken: process.env.OPENAI_API_KEY!,
141
139
  messages: [
142
140
  {
143
141
  role: "system",
package/index.d.mts CHANGED
@@ -69,6 +69,8 @@ type Event = {
69
69
  url: string;
70
70
  urlGroup?: string;
71
71
  endpointName?: string;
72
+ header?: Record<string, string>;
73
+ body?: string;
72
74
  };
73
75
  type EventPayload = Omit<Event, "urlGroup"> & {
74
76
  topicName: string;
@@ -99,6 +101,32 @@ type RateLimit = {
99
101
  reset: string | null;
100
102
  };
101
103
 
104
+ type ProviderReturnType = {
105
+ owner: "upstash" | "openai" | "custom";
106
+ baseUrl: string;
107
+ token: string;
108
+ };
109
+ declare const upstash: () => {
110
+ owner: "upstash";
111
+ baseUrl: "https://qstash.upstash.io/llm";
112
+ token: string;
113
+ };
114
+ declare const openai: ({ token, }: {
115
+ token: string;
116
+ }) => {
117
+ owner: "openai";
118
+ baseUrl: "https://api.openai.com";
119
+ token: string;
120
+ };
121
+ declare const custom: ({ baseUrl, token, }: {
122
+ token: string;
123
+ baseUrl: string;
124
+ }) => {
125
+ owner: "custom";
126
+ baseUrl: string;
127
+ token: string;
128
+ };
129
+
102
130
  type ChatCompletionMessage = {
103
131
  role: "system" | "assistant" | "user";
104
132
  content: string;
@@ -164,7 +192,6 @@ type StreamDisabled = {
164
192
  } | object;
165
193
  type StreamParameter = StreamEnabled | StreamDisabled;
166
194
  type OpenAIChatModel = "gpt-4-turbo" | "gpt-4-turbo-2024-04-09" | "gpt-4-0125-preview" | "gpt-4-turbo-preview" | "gpt-4-1106-preview" | "gpt-4-vision-preview" | "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-1106" | "gpt-3.5-turbo-0125" | "gpt-3.5-turbo-16k-0613";
167
- type TogetherAIChatModel = "zero-one-ai/Yi-34B-Chat" | "allenai/OLMo-7B-Instruct" | "allenai/OLMo-7B-Twin-2T" | "allenai/OLMo-7B" | "Austism/chronos-hermes-13b" | "cognitivecomputations/dolphin-2.5-mixtral-8x7b" | "databricks/dbrx-instruct" | "deepseek-ai/deepseek-coder-33b-instruct" | "deepseek-ai/deepseek-llm-67b-chat" | "garage-bAInd/Platypus2-70B-instruct" | "google/gemma-2b-it" | "google/gemma-7b-it" | "Gryphe/MythoMax-L2-13b" | "lmsys/vicuna-13b-v1.5" | "lmsys/vicuna-7b-v1.5" | "codellama/CodeLlama-13b-Instruct-hf" | "codellama/CodeLlama-34b-Instruct-hf" | "codellama/CodeLlama-70b-Instruct-hf" | "codellama/CodeLlama-7b-Instruct-hf" | "meta-llama/Llama-2-70b-chat-hf" | "meta-llama/Llama-2-13b-chat-hf" | "meta-llama/Llama-2-7b-chat-hf" | "meta-llama/Llama-3-8b-chat-hf" | "meta-llama/Llama-3-70b-chat-hf" | "mistralai/Mistral-7B-Instruct-v0.1" | "mistralai/Mistral-7B-Instruct-v0.2" | "mistralai/Mistral-7B-Instruct-v0.3" | "mistralai/Mixtral-8x7B-Instruct-v0.1" | "mistralai/Mixtral-8x22B-Instruct-v0.1" | "NousResearch/Nous-Capybara-7B-V1p9" | "NousResearch/Nous-Hermes-2-Mistral-7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO" | "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT" | "NousResearch/Nous-Hermes-llama-2-7b" | "NousResearch/Nous-Hermes-Llama2-13b" | "NousResearch/Nous-Hermes-2-Yi-34B" | "openchat/openchat-3.5-1210" | "Open-Orca/Mistral-7B-OpenOrca" | "Qwen/Qwen1.5-0.5B-Chat" | "Qwen/Qwen1.5-1.8B-Chat" | "Qwen/Qwen1.5-4B-Chat" | "Qwen/Qwen1.5-7B-Chat" | "Qwen/Qwen1.5-14B-Chat" | "Qwen/Qwen1.5-32B-Chat" | "Qwen/Qwen1.5-72B-Chat" | "Qwen/Qwen1.5-110B-Chat" | "Qwen/Qwen2-72B-Instruct" | "snorkelai/Snorkel-Mistral-PairRM-DPO" | "Snowflake/snowflake-arctic-instruct" | "togethercomputer/alpaca-7b" | "teknium/OpenHermes-2-Mistral-7B" | "teknium/OpenHermes-2p5-Mistral-7B" | "togethercomputer/Llama-2-7B-32K-Instruct" | "togethercomputer/RedPajama-INCITE-Chat-3B-v1" | "togethercomputer/RedPajama-INCITE-7B-Chat" | "togethercomputer/StripedHyena-Nous-7B" | "Undi95/ReMM-SLERP-L2-13B" | "Undi95/Toppy-M-7B" | "WizardLM/WizardLM-13B-V1.2" | "upstage/SOLAR-10.7B-Instruct-v1.0";
168
195
  type ChatRequestCommonFields = {
169
196
  frequency_penalty?: number;
170
197
  logit_bias?: Record<string, number>;
@@ -186,18 +213,14 @@ type PromptChatRequestFields = ChatRequestCommonFields & {
186
213
  type ChatRequestFields = ChatRequestCommonFields & {
187
214
  messages: ChatCompletionMessage[];
188
215
  };
189
- type LlmProvider = "openai" | "togetherai";
190
- type LlmProviderBaseUrl = "https://api.openai.com" | "https://api.together.xyz";
191
216
  type ChatRequestProviders = {
192
- provider: "openai";
217
+ provider: ProviderReturnType;
193
218
  model: OpenAIChatModel;
194
- llmToken: string;
195
219
  } | {
196
- provider: "togetherai";
197
- model: TogetherAIChatModel;
198
- llmToken: string;
220
+ provider: ProviderReturnType;
221
+ model: string;
199
222
  } | {
200
- provider: "upstash";
223
+ provider: ProviderReturnType;
201
224
  model: ChatModel;
202
225
  };
203
226
  type PromptChatRequest<TStream extends StreamParameter> = ChatRequestProviders & PromptChatRequestFields & TStream;
@@ -272,6 +295,15 @@ type Message = {
272
295
  * The url where this message is sent to.
273
296
  */
274
297
  url: string;
298
+ /**
299
+ * The endpoint name of the message if the endpoint is given a
300
+ * name within the url group.
301
+ */
302
+ endpointName?: string;
303
+ /**
304
+ * The api name if this message was sent to an api
305
+ */
306
+ api?: string;
275
307
  /**
276
308
  * The http method used to deliver the message
277
309
  */
@@ -284,6 +316,11 @@ type Message = {
284
316
  * The http body sent to your API
285
317
  */
286
318
  body?: string;
319
+ /**
320
+ * The base64 encoded body if the body contains non-UTF-8 characters,
321
+ * `None` otherwise.
322
+ */
323
+ bodyBase64?: string;
287
324
  /**
288
325
  * Maxmimum number of retries.
289
326
  */
@@ -308,6 +345,14 @@ type Message = {
308
345
  * The queue name if this message was sent to a queue.
309
346
  */
310
347
  queueName?: string;
348
+ /**
349
+ * The scheduleId of the message if the message is triggered by a schedule
350
+ */
351
+ scheduleId?: string;
352
+ /**
353
+ * IP address of the publisher of this message
354
+ */
355
+ callerIp?: string;
311
356
  };
312
357
  type MessagePayload = Omit<Message, "urlGroup"> & {
313
358
  topicName: string;
@@ -323,10 +368,75 @@ declare class Messages {
323
368
  * Cancel a message
324
369
  */
325
370
  delete(messageId: string): Promise<void>;
371
+ deleteMany(messageIds: string[]): Promise<number>;
372
+ deleteAll(): Promise<number>;
326
373
  }
327
374
 
328
375
  type DlqMessage = Message & {
376
+ /**
377
+ * The unique id within the DLQ
378
+ */
329
379
  dlqId: string;
380
+ /**
381
+ * The HTTP status code of the last failed delivery attempt
382
+ */
383
+ responseStatus?: number;
384
+ /**
385
+ * The response headers of the last failed delivery attempt
386
+ */
387
+ responseHeader?: Record<string, string[]>;
388
+ /**
389
+ * The response body of the last failed delivery attempt if it is
390
+ * composed of UTF-8 characters only, `None` otherwise.
391
+ */
392
+ responseBody?: string;
393
+ /**
394
+ * The base64 encoded response body of the last failed delivery attempt
395
+ * if the response body contains non-UTF-8 characters, `None` otherwise.
396
+ */
397
+ responseBodyBase64?: string;
398
+ };
399
+ type DLQFilter = {
400
+ /**
401
+ * Filter DLQ entries by message id
402
+ */
403
+ messageId?: string;
404
+ /**
405
+ * Filter DLQ entries by url
406
+ */
407
+ url?: string;
408
+ /**
409
+ * Filter DLQ entries by url group name
410
+ */
411
+ urlGroup?: string;
412
+ /**
413
+ * Filter DLQ entries by api name
414
+ */
415
+ api?: string;
416
+ /**
417
+ * Filter DLQ entries by queue name
418
+ */
419
+ queueName?: string;
420
+ /**
421
+ * Filter DLQ entries by schedule id
422
+ */
423
+ scheduleId?: string;
424
+ /**
425
+ * Filter DLQ entries by starting time, in milliseconds
426
+ */
427
+ fromDate?: number;
428
+ /**
429
+ * Filter DLQ entries by ending time, in milliseconds
430
+ */
431
+ toDate?: number;
432
+ /**
433
+ * Filter DLQ entries by HTTP status of the response
434
+ */
435
+ responseStatus?: number;
436
+ /**
437
+ * Filter DLQ entries by IP address of the publisher of the message
438
+ */
439
+ callerIp?: string;
330
440
  };
331
441
  declare class DLQ {
332
442
  private readonly http;
@@ -336,6 +446,8 @@ declare class DLQ {
336
446
  */
337
447
  listMessages(options?: {
338
448
  cursor?: string;
449
+ count?: number;
450
+ filter?: DLQFilter;
339
451
  }): Promise<{
340
452
  messages: DlqMessage[];
341
453
  cursor?: string;
@@ -354,6 +466,44 @@ declare class DLQ {
354
466
  }>;
355
467
  }
356
468
 
469
+ declare class Chat {
470
+ private http;
471
+ constructor(http: Requester);
472
+ private static toChatRequest;
473
+ /**
474
+ * Calls the Upstash completions api given a ChatRequest.
475
+ *
476
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
477
+ * if stream is enabled.
478
+ *
479
+ * @param request ChatRequest with messages
480
+ * @returns Chat completion or stream
481
+ */
482
+ create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
483
+ /**
484
+ * Calls the Upstash completions api given a ChatRequest.
485
+ *
486
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
487
+ * if stream is enabled.
488
+ *
489
+ * @param request ChatRequest with messages
490
+ * @returns Chat completion or stream
491
+ */
492
+ private createThirdParty;
493
+ /**
494
+ * Calls the Upstash completions api given a PromptRequest.
495
+ *
496
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
497
+ * if stream is enabled.
498
+ *
499
+ * @param request PromptRequest with system and user messages.
500
+ * Note that system parameter shouldn't be passed in the case of
501
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
502
+ * @returns Chat completion or stream
503
+ */
504
+ prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
505
+ }
506
+
357
507
  type QueueResponse = {
358
508
  createdAt: number;
359
509
  updatedAt: number;
@@ -426,10 +576,12 @@ type Schedule = {
426
576
  method: string;
427
577
  header?: Record<string, string[]>;
428
578
  body?: string;
579
+ bodyBase64?: string;
429
580
  retries: number;
430
581
  delay?: number;
431
582
  callback?: string;
432
583
  failureCallback?: string;
584
+ callerIp?: string;
433
585
  isPaused: true | undefined;
434
586
  };
435
587
  type CreateScheduleRequest = {
@@ -621,44 +773,6 @@ declare class UrlGroups {
621
773
  delete(name: string): Promise<void>;
622
774
  }
623
775
 
624
- declare class Chat {
625
- private http;
626
- constructor(http: Requester);
627
- private static toChatRequest;
628
- /**
629
- * Calls the Upstash completions api given a ChatRequest.
630
- *
631
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
632
- * if stream is enabled.
633
- *
634
- * @param request ChatRequest with messages
635
- * @returns Chat completion or stream
636
- */
637
- create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
638
- /**
639
- * Calls the Upstash completions api given a ChatRequest.
640
- *
641
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
642
- * if stream is enabled.
643
- *
644
- * @param request ChatRequest with messages
645
- * @returns Chat completion or stream
646
- */
647
- private createThirdParty;
648
- /**
649
- * Calls the Upstash completions api given a PromptRequest.
650
- *
651
- * Returns a ChatCompletion or a stream of ChatCompletionChunks
652
- * if stream is enabled.
653
- *
654
- * @param request PromptRequest with system and user messages.
655
- * Note that system parameter shouldn't be passed in the case of
656
- * mistralai/Mistral-7B-Instruct-v0.2 model.
657
- * @returns Chat completion or stream
658
- */
659
- prompt: <TStream extends StreamParameter>(request: PromptChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
660
- }
661
-
662
776
  type ClientConfig = {
663
777
  /**
664
778
  * Url of the qstash api server.
@@ -793,7 +907,6 @@ type PublishRequest<TBody = BodyInit> = {
793
907
  url: string;
794
908
  urlGroup?: never;
795
909
  api?: never;
796
- llmProvider?: never;
797
910
  } | {
798
911
  url?: never;
799
912
  /**
@@ -801,30 +914,16 @@ type PublishRequest<TBody = BodyInit> = {
801
914
  */
802
915
  urlGroup: string;
803
916
  api?: never;
804
- llmProvider?: never;
805
- } | {
806
- url?: never;
807
- urlGroup?: never;
808
- /**
809
- * The api endpoint the request should be sent to.
810
- */
811
- api: "llm";
812
- llmProvider?: never;
813
917
  } | {
814
- /**
815
- * 3rd party provider url such as OpenAI: https://api.openai.com/v1/chat/completions
816
- */
817
918
  url?: string;
818
919
  urlGroup?: never;
819
- api?: never;
820
- /**
821
- * 3rd party provider name such as OpenAI, TogetherAI
822
- */
823
- llmProvider: LlmProvider;
824
920
  /**
825
- * 3rd party provider secret key
921
+ * The api endpoint the request should be sent to.
826
922
  */
827
- llmToken?: string;
923
+ api: {
924
+ name: "llm";
925
+ provider?: ProviderReturnType;
926
+ };
828
927
  });
829
928
  type PublishJsonRequest = Omit<PublishRequest, "body"> & {
830
929
  /**
@@ -842,6 +941,7 @@ type EventsRequestFilter = {
842
941
  state?: State;
843
942
  url?: string;
844
943
  urlGroup?: string;
944
+ api?: string;
845
945
  scheduleId?: string;
846
946
  queueName?: string;
847
947
  fromDate?: number;
@@ -956,4 +1056,4 @@ declare class QstashChatRatelimitError extends QstashError {
956
1056
  constructor(args: ChatRateLimit);
957
1057
  }
958
1058
 
959
- export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventPayload, type EventsRequest, type GetEventsPayload, type GetEventsResponse, type HeadersInit, type LlmProvider, type LlmProviderBaseUrl, type Message, type MessagePayload, Messages, type OpenAIChatModel, type PromptChatRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToUrlGroupsResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type TogetherAIChatModel, type UrlGroup, UrlGroups, type VerifyRequest, type WithCursor };
1059
+ export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventPayload, type EventsRequest, type GetEventsPayload, type GetEventsResponse, type HeadersInit, type Message, type MessagePayload, Messages, type OpenAIChatModel, type PromptChatRequest, type ProviderReturnType, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToUrlGroupsResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type UrlGroup, UrlGroups, type VerifyRequest, type WithCursor, custom, openai, upstash };