@upstash/qstash 2.5.4 → 2.5.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (5) hide show
  1. package/index.d.mts +159 -7
  2. package/index.d.ts +159 -7
  3. package/index.js +160 -25
  4. package/index.mjs +155 -20
  5. package/package.json +1 -1
package/index.d.mts CHANGED
@@ -75,6 +75,107 @@ type WithCursor<T> = T & {
75
75
  };
76
76
  type BodyInit = Blob | FormData | URLSearchParams | ReadableStream<Uint8Array> | string;
77
77
  type HeadersInit = Headers | Record<string, string> | [string, string][] | IterableIterator<[string, string]>;
78
+ type RequestOptions = RequestInit & {
79
+ backend?: string;
80
+ };
81
+ type ChatRateLimit = {
82
+ "limit-requests": string | null;
83
+ "limit-tokens": string | null;
84
+ "remaining-requests": string | null;
85
+ "remaining-tokens": string | null;
86
+ "reset-requests": string | null;
87
+ "reset-tokens": string | null;
88
+ };
89
+ type RateLimit = {
90
+ limit: string | null;
91
+ remaining: string | null;
92
+ reset: string | null;
93
+ };
94
+
95
+ type ChatCompletionMessage = {
96
+ role: "system" | "assistant" | "user";
97
+ content: string;
98
+ };
99
+ type ChatModel = "meta-llama/Meta-Llama-3-8B-Instruct" | "mistralai/Mistral-7B-Instruct-v0.2";
100
+ type ChatResponseFormat = {
101
+ type: "text" | "json_object";
102
+ };
103
+ type TopLogprob = {
104
+ token: string;
105
+ bytes: number[];
106
+ logprob: number;
107
+ };
108
+ type ChatCompletionTokenLogprob = {
109
+ token: string;
110
+ bytes: number[];
111
+ logprob: number;
112
+ top_logprobs: TopLogprob[];
113
+ };
114
+ type ChoiceLogprobs = {
115
+ content: ChatCompletionTokenLogprob[];
116
+ };
117
+ type Choice = {
118
+ finish_reason: "stop" | "length";
119
+ index: number;
120
+ logprobs: ChoiceLogprobs;
121
+ message: ChatCompletionMessage;
122
+ };
123
+ type CompletionUsage = {
124
+ completion_tokens: number;
125
+ prompt_tokens: number;
126
+ total_tokens: number;
127
+ };
128
+ type ChatCompletion = {
129
+ id: string;
130
+ choices: Choice[];
131
+ created: number;
132
+ model: string;
133
+ object: "chat.completion";
134
+ system_fingerprint: string;
135
+ usage: CompletionUsage;
136
+ };
137
+ type ChunkChoice = {
138
+ delta: ChatCompletionMessage;
139
+ finish_reason: "stop" | "length";
140
+ index: number;
141
+ logprobs: ChoiceLogprobs;
142
+ };
143
+ type ChatCompletionChunk = {
144
+ id: string;
145
+ choices: ChunkChoice[];
146
+ created: number;
147
+ model: string;
148
+ object: "chat.completion.chunk";
149
+ system_fingerprint: string;
150
+ usage: CompletionUsage;
151
+ };
152
+ type StreamEnabled = {
153
+ stream: true;
154
+ };
155
+ type StreamDisabled = {
156
+ stream: false;
157
+ } | object;
158
+ type StreamParameter = StreamEnabled | StreamDisabled;
159
+ type ChatRequest<TStream extends StreamParameter> = {
160
+ messages: ChatCompletionMessage[];
161
+ model: ChatModel;
162
+ frequency_penalty?: number;
163
+ logit_bias?: Record<string, number>;
164
+ logprobs?: boolean;
165
+ top_logprobs?: number;
166
+ max_tokens?: number;
167
+ n?: number;
168
+ presence_penalty?: number;
169
+ response_format?: ChatResponseFormat;
170
+ seed?: number;
171
+ stop?: string | string[];
172
+ temperature?: number;
173
+ top_p?: number;
174
+ } & TStream;
175
+ type PromptRequest<TStream extends StreamParameter> = Omit<ChatRequest<TStream>, "messages" | "stream"> & {
176
+ system: string;
177
+ user: string;
178
+ } & TStream;
78
179
 
79
180
  type UpstashRequest = {
80
181
  /**
@@ -111,6 +212,7 @@ type UpstashResponse<TResult> = TResult & {
111
212
  };
112
213
  type Requester = {
113
214
  request: <TResult = unknown>(request: UpstashRequest) => Promise<UpstashResponse<TResult>>;
215
+ requestStream: (request: UpstashRequest) => AsyncIterable<ChatCompletionChunk>;
114
216
  };
115
217
  type RetryConfig = false | {
116
218
  /**
@@ -259,7 +361,7 @@ declare class Queue {
259
361
  /**
260
362
  * Enqueue a message to a queue, serializing the body to JSON.
261
363
  */
262
- enqueueJSON<TBody = unknown>(request: PublishRequest<TBody>): Promise<PublishResponse<PublishRequest<TBody>>>;
364
+ enqueueJSON<TBody = unknown, TRequest extends PublishRequest<TBody> = PublishRequest<TBody>>(request: TRequest): Promise<PublishResponse<TRequest>>;
263
365
  }
264
366
 
265
367
  type Schedule = {
@@ -438,6 +540,34 @@ declare class Topics {
438
540
  delete(name: string): Promise<void>;
439
541
  }
440
542
 
543
+ declare class Chat {
544
+ private http;
545
+ constructor(http: Requester);
546
+ private static toChatRequest;
547
+ /**
548
+ * Calls the Upstash completions api given a ChatRequest.
549
+ *
550
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
551
+ * if stream is enabled.
552
+ *
553
+ * @param request ChatRequest with messages
554
+ * @returns Chat completion or stream
555
+ */
556
+ create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
557
+ /**
558
+ * Calls the Upstash completions api given a PromptRequest.
559
+ *
560
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
561
+ * if stream is enabled.
562
+ *
563
+ * @param request PromptRequest with system and user messages.
564
+ * Note that system parameter shouldn't be passed in the case of
565
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
566
+ * @returns Chat completion or stream
567
+ */
568
+ prompt: <TStream extends StreamParameter>(request: PromptRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
569
+ }
570
+
441
571
  type ClientConfig = {
442
572
  /**
443
573
  * Url of the qstash api server.
@@ -560,12 +690,21 @@ type PublishRequest<TBody = BodyInit> = {
560
690
  */
561
691
  url: string;
562
692
  topic?: never;
693
+ api?: never;
563
694
  } | {
564
695
  url?: never;
565
696
  /**
566
- * The url where the message should be sent to.
697
+ * The topic the message should be sent to.
567
698
  */
568
699
  topic: string;
700
+ api?: never;
701
+ } | {
702
+ url?: never;
703
+ topic?: never;
704
+ /**
705
+ * The api endpoint the request should be sent to.
706
+ */
707
+ api: "llm";
569
708
  });
570
709
  type PublishJsonRequest = Omit<PublishRequest, "body"> & {
571
710
  /**
@@ -629,6 +768,12 @@ declare class Client {
629
768
  * Create, read, update or delete queues.
630
769
  */
631
770
  queue(request?: QueueRequest): Queue;
771
+ /**
772
+ * Access the Chat API
773
+ *
774
+ * Call the create or prompt methods
775
+ */
776
+ chat(): Chat;
632
777
  publish<TRequest extends PublishRequest>(request: TRequest): Promise<PublishResponse<TRequest>>;
633
778
  /**
634
779
  * publishJSON is a utility wrapper around `publish` that automatically serializes the body
@@ -664,15 +809,19 @@ declare class Client {
664
809
  */
665
810
  events(request?: EventsRequest): Promise<GetEventsResponse>;
666
811
  }
667
- type PublishToUrlResponse = {
812
+ type PublishToApiResponse = {
668
813
  messageId: string;
814
+ };
815
+ type PublishToUrlResponse = PublishToApiResponse & {
669
816
  url: string;
670
817
  deduplicated?: boolean;
671
818
  };
672
819
  type PublishToTopicResponse = PublishToUrlResponse[];
673
- type PublishResponse<R> = R extends {
820
+ type PublishResponse<TRequest> = TRequest extends {
674
821
  url: string;
675
- } ? PublishToUrlResponse : PublishToTopicResponse;
822
+ } ? PublishToUrlResponse : TRequest extends {
823
+ topic: string;
824
+ } ? PublishToTopicResponse : PublishToApiResponse;
676
825
 
677
826
  /**
678
827
  * Result of 500 Internal Server Error
@@ -681,7 +830,10 @@ declare class QstashError extends Error {
681
830
  constructor(message: string);
682
831
  }
683
832
  declare class QstashRatelimitError extends QstashError {
684
- constructor(args: unknown);
833
+ constructor(args: RateLimit);
834
+ }
835
+ declare class QstashChatRatelimitError extends QstashError {
836
+ constructor(args: ChatRateLimit);
685
837
  }
686
838
 
687
- export { type AddEndpointsRequest, type BodyInit, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventsRequest, type GetEventsResponse, type HeadersInit, type Message, Messages, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToTopicResponse, type PublishToUrlResponse, QstashError, QstashRatelimitError, type QueueRequest, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type Schedule, Schedules, SignatureError, type State, type Topic, Topics, type VerifyRequest, type WithCursor };
839
+ export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventsRequest, type GetEventsResponse, type HeadersInit, type Message, Messages, type PromptRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToTopicResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type Topic, Topics, type VerifyRequest, type WithCursor };
package/index.d.ts CHANGED
@@ -75,6 +75,107 @@ type WithCursor<T> = T & {
75
75
  };
76
76
  type BodyInit = Blob | FormData | URLSearchParams | ReadableStream<Uint8Array> | string;
77
77
  type HeadersInit = Headers | Record<string, string> | [string, string][] | IterableIterator<[string, string]>;
78
+ type RequestOptions = RequestInit & {
79
+ backend?: string;
80
+ };
81
+ type ChatRateLimit = {
82
+ "limit-requests": string | null;
83
+ "limit-tokens": string | null;
84
+ "remaining-requests": string | null;
85
+ "remaining-tokens": string | null;
86
+ "reset-requests": string | null;
87
+ "reset-tokens": string | null;
88
+ };
89
+ type RateLimit = {
90
+ limit: string | null;
91
+ remaining: string | null;
92
+ reset: string | null;
93
+ };
94
+
95
+ type ChatCompletionMessage = {
96
+ role: "system" | "assistant" | "user";
97
+ content: string;
98
+ };
99
+ type ChatModel = "meta-llama/Meta-Llama-3-8B-Instruct" | "mistralai/Mistral-7B-Instruct-v0.2";
100
+ type ChatResponseFormat = {
101
+ type: "text" | "json_object";
102
+ };
103
+ type TopLogprob = {
104
+ token: string;
105
+ bytes: number[];
106
+ logprob: number;
107
+ };
108
+ type ChatCompletionTokenLogprob = {
109
+ token: string;
110
+ bytes: number[];
111
+ logprob: number;
112
+ top_logprobs: TopLogprob[];
113
+ };
114
+ type ChoiceLogprobs = {
115
+ content: ChatCompletionTokenLogprob[];
116
+ };
117
+ type Choice = {
118
+ finish_reason: "stop" | "length";
119
+ index: number;
120
+ logprobs: ChoiceLogprobs;
121
+ message: ChatCompletionMessage;
122
+ };
123
+ type CompletionUsage = {
124
+ completion_tokens: number;
125
+ prompt_tokens: number;
126
+ total_tokens: number;
127
+ };
128
+ type ChatCompletion = {
129
+ id: string;
130
+ choices: Choice[];
131
+ created: number;
132
+ model: string;
133
+ object: "chat.completion";
134
+ system_fingerprint: string;
135
+ usage: CompletionUsage;
136
+ };
137
+ type ChunkChoice = {
138
+ delta: ChatCompletionMessage;
139
+ finish_reason: "stop" | "length";
140
+ index: number;
141
+ logprobs: ChoiceLogprobs;
142
+ };
143
+ type ChatCompletionChunk = {
144
+ id: string;
145
+ choices: ChunkChoice[];
146
+ created: number;
147
+ model: string;
148
+ object: "chat.completion.chunk";
149
+ system_fingerprint: string;
150
+ usage: CompletionUsage;
151
+ };
152
+ type StreamEnabled = {
153
+ stream: true;
154
+ };
155
+ type StreamDisabled = {
156
+ stream: false;
157
+ } | object;
158
+ type StreamParameter = StreamEnabled | StreamDisabled;
159
+ type ChatRequest<TStream extends StreamParameter> = {
160
+ messages: ChatCompletionMessage[];
161
+ model: ChatModel;
162
+ frequency_penalty?: number;
163
+ logit_bias?: Record<string, number>;
164
+ logprobs?: boolean;
165
+ top_logprobs?: number;
166
+ max_tokens?: number;
167
+ n?: number;
168
+ presence_penalty?: number;
169
+ response_format?: ChatResponseFormat;
170
+ seed?: number;
171
+ stop?: string | string[];
172
+ temperature?: number;
173
+ top_p?: number;
174
+ } & TStream;
175
+ type PromptRequest<TStream extends StreamParameter> = Omit<ChatRequest<TStream>, "messages" | "stream"> & {
176
+ system: string;
177
+ user: string;
178
+ } & TStream;
78
179
 
79
180
  type UpstashRequest = {
80
181
  /**
@@ -111,6 +212,7 @@ type UpstashResponse<TResult> = TResult & {
111
212
  };
112
213
  type Requester = {
113
214
  request: <TResult = unknown>(request: UpstashRequest) => Promise<UpstashResponse<TResult>>;
215
+ requestStream: (request: UpstashRequest) => AsyncIterable<ChatCompletionChunk>;
114
216
  };
115
217
  type RetryConfig = false | {
116
218
  /**
@@ -259,7 +361,7 @@ declare class Queue {
259
361
  /**
260
362
  * Enqueue a message to a queue, serializing the body to JSON.
261
363
  */
262
- enqueueJSON<TBody = unknown>(request: PublishRequest<TBody>): Promise<PublishResponse<PublishRequest<TBody>>>;
364
+ enqueueJSON<TBody = unknown, TRequest extends PublishRequest<TBody> = PublishRequest<TBody>>(request: TRequest): Promise<PublishResponse<TRequest>>;
263
365
  }
264
366
 
265
367
  type Schedule = {
@@ -438,6 +540,34 @@ declare class Topics {
438
540
  delete(name: string): Promise<void>;
439
541
  }
440
542
 
543
+ declare class Chat {
544
+ private http;
545
+ constructor(http: Requester);
546
+ private static toChatRequest;
547
+ /**
548
+ * Calls the Upstash completions api given a ChatRequest.
549
+ *
550
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
551
+ * if stream is enabled.
552
+ *
553
+ * @param request ChatRequest with messages
554
+ * @returns Chat completion or stream
555
+ */
556
+ create: <TStream extends StreamParameter>(request: ChatRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
557
+ /**
558
+ * Calls the Upstash completions api given a PromptRequest.
559
+ *
560
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
561
+ * if stream is enabled.
562
+ *
563
+ * @param request PromptRequest with system and user messages.
564
+ * Note that system parameter shouldn't be passed in the case of
565
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
566
+ * @returns Chat completion or stream
567
+ */
568
+ prompt: <TStream extends StreamParameter>(request: PromptRequest<TStream>) => Promise<TStream extends StreamEnabled ? AsyncIterable<ChatCompletionChunk> : ChatCompletion>;
569
+ }
570
+
441
571
  type ClientConfig = {
442
572
  /**
443
573
  * Url of the qstash api server.
@@ -560,12 +690,21 @@ type PublishRequest<TBody = BodyInit> = {
560
690
  */
561
691
  url: string;
562
692
  topic?: never;
693
+ api?: never;
563
694
  } | {
564
695
  url?: never;
565
696
  /**
566
- * The url where the message should be sent to.
697
+ * The topic the message should be sent to.
567
698
  */
568
699
  topic: string;
700
+ api?: never;
701
+ } | {
702
+ url?: never;
703
+ topic?: never;
704
+ /**
705
+ * The api endpoint the request should be sent to.
706
+ */
707
+ api: "llm";
569
708
  });
570
709
  type PublishJsonRequest = Omit<PublishRequest, "body"> & {
571
710
  /**
@@ -629,6 +768,12 @@ declare class Client {
629
768
  * Create, read, update or delete queues.
630
769
  */
631
770
  queue(request?: QueueRequest): Queue;
771
+ /**
772
+ * Access the Chat API
773
+ *
774
+ * Call the create or prompt methods
775
+ */
776
+ chat(): Chat;
632
777
  publish<TRequest extends PublishRequest>(request: TRequest): Promise<PublishResponse<TRequest>>;
633
778
  /**
634
779
  * publishJSON is a utility wrapper around `publish` that automatically serializes the body
@@ -664,15 +809,19 @@ declare class Client {
664
809
  */
665
810
  events(request?: EventsRequest): Promise<GetEventsResponse>;
666
811
  }
667
- type PublishToUrlResponse = {
812
+ type PublishToApiResponse = {
668
813
  messageId: string;
814
+ };
815
+ type PublishToUrlResponse = PublishToApiResponse & {
669
816
  url: string;
670
817
  deduplicated?: boolean;
671
818
  };
672
819
  type PublishToTopicResponse = PublishToUrlResponse[];
673
- type PublishResponse<R> = R extends {
820
+ type PublishResponse<TRequest> = TRequest extends {
674
821
  url: string;
675
- } ? PublishToUrlResponse : PublishToTopicResponse;
822
+ } ? PublishToUrlResponse : TRequest extends {
823
+ topic: string;
824
+ } ? PublishToTopicResponse : PublishToApiResponse;
676
825
 
677
826
  /**
678
827
  * Result of 500 Internal Server Error
@@ -681,7 +830,10 @@ declare class QstashError extends Error {
681
830
  constructor(message: string);
682
831
  }
683
832
  declare class QstashRatelimitError extends QstashError {
684
- constructor(args: unknown);
833
+ constructor(args: RateLimit);
834
+ }
835
+ declare class QstashChatRatelimitError extends QstashError {
836
+ constructor(args: ChatRateLimit);
685
837
  }
686
838
 
687
- export { type AddEndpointsRequest, type BodyInit, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventsRequest, type GetEventsResponse, type HeadersInit, type Message, Messages, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToTopicResponse, type PublishToUrlResponse, QstashError, QstashRatelimitError, type QueueRequest, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type Schedule, Schedules, SignatureError, type State, type Topic, Topics, type VerifyRequest, type WithCursor };
839
+ export { type AddEndpointsRequest, type BodyInit, Chat, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionMessage, type ChatRateLimit, type ChatRequest, Client, type CreateScheduleRequest, type Endpoint, type Event, type EventsRequest, type GetEventsResponse, type HeadersInit, type Message, Messages, type PromptRequest, type PublishBatchRequest, type PublishJsonRequest, type PublishRequest, type PublishResponse, type PublishToApiResponse, type PublishToTopicResponse, type PublishToUrlResponse, QstashChatRatelimitError, QstashError, QstashRatelimitError, type QueueRequest, type RateLimit, Receiver, type ReceiverConfig, type RemoveEndpointsRequest, type RequestOptions, type Schedule, Schedules, SignatureError, type State, type StreamDisabled, type StreamEnabled, type StreamParameter, type Topic, Topics, type VerifyRequest, type WithCursor };
package/index.js CHANGED
@@ -1,4 +1,4 @@
1
- "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; }
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class; var _class2;
2
2
 
3
3
 
4
4
  var _chunkUUR7N6E6js = require('./chunk-UUR7N6E6.js');
@@ -55,14 +55,19 @@ var QstashRatelimitError = class extends QstashError {
55
55
  super(`You have been ratelimited. ${JSON.stringify(args)} `);
56
56
  }
57
57
  };
58
+ var QstashChatRatelimitError = class extends QstashError {
59
+ constructor(args) {
60
+ super(`You have been ratelimited. ${JSON.stringify(args)} `);
61
+ }
62
+ };
58
63
 
59
64
  // src/client/http.ts
60
- var HttpClient = class {
65
+ var HttpClient = (_class = class {
61
66
 
62
67
 
63
68
 
64
69
 
65
- constructor(config) {
70
+ constructor(config) {;_class.prototype.__init.call(this);_class.prototype.__init2.call(this);
66
71
  this.baseUrl = config.baseUrl.replace(/\/$/, "");
67
72
  this.authorization = config.authorization;
68
73
  this.retry = // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
@@ -75,22 +80,44 @@ var HttpClient = class {
75
80
  };
76
81
  }
77
82
  async request(request) {
78
- const headers = new Headers(request.headers);
79
- headers.set("Authorization", this.authorization);
80
- const requestOptions = {
81
- method: request.method,
82
- headers,
83
- body: request.body,
84
- keepalive: request.keepalive
85
- };
86
- const url = new URL([this.baseUrl, ...request.path].join("/"));
87
- if (request.query) {
88
- for (const [key, value] of Object.entries(request.query)) {
89
- if (value !== void 0) {
90
- url.searchParams.set(key, value.toString());
83
+ const { response } = await this.requestWithBackoff(request);
84
+ if (request.parseResponseAsJson === false) {
85
+ return void 0;
86
+ }
87
+ return await response.json();
88
+ }
89
+ async *requestStream(request) {
90
+ const { response } = await this.requestWithBackoff(request);
91
+ if (!response.body) {
92
+ throw new Error("No response body");
93
+ }
94
+ const body = response.body;
95
+ const reader = body.getReader();
96
+ const decoder = new TextDecoder();
97
+ try {
98
+ while (true) {
99
+ const { done, value } = await reader.read();
100
+ if (done) {
101
+ break;
102
+ }
103
+ const chunkText = decoder.decode(value, { stream: true });
104
+ const chunks = chunkText.split("\n").filter(Boolean);
105
+ for (const chunk of chunks) {
106
+ if (chunk.startsWith("data: ")) {
107
+ const data = chunk.slice(6);
108
+ if (data === "[DONE]") {
109
+ break;
110
+ }
111
+ yield JSON.parse(data);
112
+ }
91
113
  }
92
114
  }
115
+ } finally {
116
+ await reader.cancel();
93
117
  }
118
+ }
119
+ __init() {this.requestWithBackoff = async (request) => {
120
+ const [url, requestOptions] = this.processRequest(request);
94
121
  let response = void 0;
95
122
  let error = void 0;
96
123
  for (let index = 0; index < this.retry.attempts; index++) {
@@ -105,7 +132,43 @@ var HttpClient = class {
105
132
  if (!response) {
106
133
  throw _nullishCoalesce(error, () => ( new Error("Exhausted all retries")));
107
134
  }
135
+ await this.checkResponse(response);
136
+ return {
137
+ response,
138
+ error
139
+ };
140
+ }}
141
+ __init2() {this.processRequest = (request) => {
142
+ const headers = new Headers(request.headers);
143
+ headers.set("Authorization", this.authorization);
144
+ const requestOptions = {
145
+ method: request.method,
146
+ headers,
147
+ body: request.body,
148
+ keepalive: request.keepalive
149
+ };
150
+ const url = new URL([this.baseUrl, ...request.path].join("/"));
151
+ if (request.query) {
152
+ for (const [key, value] of Object.entries(request.query)) {
153
+ if (value !== void 0) {
154
+ url.searchParams.set(key, value.toString());
155
+ }
156
+ }
157
+ }
158
+ return [url.toString(), requestOptions];
159
+ }}
160
+ async checkResponse(response) {
108
161
  if (response.status === 429) {
162
+ if (response.headers.get("x-ratelimit-limit-requests")) {
163
+ throw new QstashChatRatelimitError({
164
+ "limit-requests": response.headers.get("x-ratelimit-limit-requests"),
165
+ "limit-tokens": response.headers.get("x-ratelimit-limit-tokens"),
166
+ "remaining-requests": response.headers.get("x-ratelimit-remaining-requests"),
167
+ "remaining-tokens": response.headers.get("x-ratelimit-remaining-tokens"),
168
+ "reset-requests": response.headers.get("x-ratelimit-reset-requests"),
169
+ "reset-tokens": response.headers.get("x-ratelimit-reset-tokens")
170
+ });
171
+ }
109
172
  throw new QstashRatelimitError({
110
173
  limit: response.headers.get("Burst-RateLimit-Limit"),
111
174
  remaining: response.headers.get("Burst-RateLimit-Remaining"),
@@ -116,12 +179,8 @@ var HttpClient = class {
116
179
  const body = await response.text();
117
180
  throw new QstashError(body.length > 0 ? body : `Error: status=${response.status}`);
118
181
  }
119
- if (request.parseResponseAsJson === false) {
120
- return void 0;
121
- }
122
- return await response.json();
123
182
  }
124
- };
183
+ }, _class);
125
184
 
126
185
  // src/client/messages.ts
127
186
  var Messages = class {
@@ -192,6 +251,9 @@ function processHeaders(request) {
192
251
  }
193
252
  return headers;
194
253
  }
254
+ function getRequestPath(request) {
255
+ return _nullishCoalesce(_nullishCoalesce(request.url, () => ( request.topic)), () => ( `api/${request.api}`));
256
+ }
195
257
 
196
258
  // src/client/queue.ts
197
259
  var Queue = class {
@@ -264,7 +326,7 @@ var Queue = class {
264
326
  throw new Error("Please provide a queue name to the Queue constructor");
265
327
  }
266
328
  const headers = processHeaders(request);
267
- const destination = _nullishCoalesce(request.url, () => ( request.topic));
329
+ const destination = getRequestPath(request);
268
330
  const response = await this.http.request({
269
331
  path: ["v2", "enqueue", this.queueName, destination],
270
332
  body: request.body,
@@ -415,6 +477,69 @@ var Topics = class {
415
477
  }
416
478
  };
417
479
 
480
+ // src/client/llm/chat.ts
481
+ var Chat = (_class2 = class _Chat {
482
+
483
+ constructor(http) {;_class2.prototype.__init3.call(this);_class2.prototype.__init4.call(this);
484
+ this.http = http;
485
+ }
486
+ static toChatRequest(request) {
487
+ const messages = [];
488
+ messages.push(
489
+ { role: "system", content: request.system },
490
+ { role: "user", content: request.user }
491
+ );
492
+ const chatRequest = { ...request, messages };
493
+ return chatRequest;
494
+ }
495
+ /**
496
+ * Calls the Upstash completions api given a ChatRequest.
497
+ *
498
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
499
+ * if stream is enabled.
500
+ *
501
+ * @param request ChatRequest with messages
502
+ * @returns Chat completion or stream
503
+ */
504
+ __init3() {this.create = async (request) => {
505
+ const body = JSON.stringify(request);
506
+ if ("stream" in request && request.stream) {
507
+ return this.http.requestStream({
508
+ path: ["llm", "v1", "chat", "completions"],
509
+ method: "POST",
510
+ headers: {
511
+ "Content-Type": "application/json",
512
+ Connection: "keep-alive",
513
+ Accept: "text/event-stream",
514
+ "Cache-Control": "no-cache"
515
+ },
516
+ body
517
+ });
518
+ }
519
+ return this.http.request({
520
+ path: ["llm", "v1", "chat", "completions"],
521
+ method: "POST",
522
+ headers: { "Content-Type": "application/json" },
523
+ body
524
+ });
525
+ }}
526
+ /**
527
+ * Calls the Upstash completions api given a PromptRequest.
528
+ *
529
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
530
+ * if stream is enabled.
531
+ *
532
+ * @param request PromptRequest with system and user messages.
533
+ * Note that system parameter shouldn't be passed in the case of
534
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
535
+ * @returns Chat completion or stream
536
+ */
537
+ __init4() {this.prompt = async (request) => {
538
+ const chatRequest = _Chat.toChatRequest(request);
539
+ return this.create(chatRequest);
540
+ }}
541
+ }, _class2);
542
+
418
543
  // src/client/client.ts
419
544
  var Client = class {
420
545
 
@@ -465,10 +590,18 @@ var Client = class {
465
590
  queue(request) {
466
591
  return new Queue(this.http, _optionalChain([request, 'optionalAccess', _6 => _6.queueName]));
467
592
  }
593
+ /**
594
+ * Access the Chat API
595
+ *
596
+ * Call the create or prompt methods
597
+ */
598
+ chat() {
599
+ return new Chat(this.http);
600
+ }
468
601
  async publish(request) {
469
602
  const headers = processHeaders(request);
470
603
  const response = await this.http.request({
471
- path: ["v2", "publish", _nullishCoalesce(request.url, () => ( request.topic))],
604
+ path: ["v2", "publish", getRequestPath(request)],
472
605
  body: request.body,
473
606
  headers,
474
607
  method: "POST"
@@ -498,7 +631,7 @@ var Client = class {
498
631
  const headers = processHeaders(message);
499
632
  const headerEntries = Object.fromEntries(headers.entries());
500
633
  messages.push({
501
- destination: _nullishCoalesce(message.url, () => ( message.topic)),
634
+ destination: getRequestPath(message),
502
635
  headers: headerEntries,
503
636
  body: message.body,
504
637
  ...message.queueName && { queue: message.queueName }
@@ -577,4 +710,6 @@ var Client = class {
577
710
 
578
711
 
579
712
 
580
- exports.Client = Client; exports.Messages = Messages; exports.QstashError = QstashError; exports.QstashRatelimitError = QstashRatelimitError; exports.Receiver = _chunkUUR7N6E6js.Receiver; exports.Schedules = Schedules; exports.SignatureError = _chunkUUR7N6E6js.SignatureError; exports.Topics = Topics;
713
+
714
+
715
+ exports.Chat = Chat; exports.Client = Client; exports.Messages = Messages; exports.QstashChatRatelimitError = QstashChatRatelimitError; exports.QstashError = QstashError; exports.QstashRatelimitError = QstashRatelimitError; exports.Receiver = _chunkUUR7N6E6js.Receiver; exports.Schedules = Schedules; exports.SignatureError = _chunkUUR7N6E6js.SignatureError; exports.Topics = Topics;
package/index.mjs CHANGED
@@ -55,6 +55,11 @@ var QstashRatelimitError = class extends QstashError {
55
55
  super(`You have been ratelimited. ${JSON.stringify(args)} `);
56
56
  }
57
57
  };
58
+ var QstashChatRatelimitError = class extends QstashError {
59
+ constructor(args) {
60
+ super(`You have been ratelimited. ${JSON.stringify(args)} `);
61
+ }
62
+ };
58
63
 
59
64
  // src/client/http.ts
60
65
  var HttpClient = class {
@@ -75,22 +80,44 @@ var HttpClient = class {
75
80
  };
76
81
  }
77
82
  async request(request) {
78
- const headers = new Headers(request.headers);
79
- headers.set("Authorization", this.authorization);
80
- const requestOptions = {
81
- method: request.method,
82
- headers,
83
- body: request.body,
84
- keepalive: request.keepalive
85
- };
86
- const url = new URL([this.baseUrl, ...request.path].join("/"));
87
- if (request.query) {
88
- for (const [key, value] of Object.entries(request.query)) {
89
- if (value !== void 0) {
90
- url.searchParams.set(key, value.toString());
83
+ const { response } = await this.requestWithBackoff(request);
84
+ if (request.parseResponseAsJson === false) {
85
+ return void 0;
86
+ }
87
+ return await response.json();
88
+ }
89
+ async *requestStream(request) {
90
+ const { response } = await this.requestWithBackoff(request);
91
+ if (!response.body) {
92
+ throw new Error("No response body");
93
+ }
94
+ const body = response.body;
95
+ const reader = body.getReader();
96
+ const decoder = new TextDecoder();
97
+ try {
98
+ while (true) {
99
+ const { done, value } = await reader.read();
100
+ if (done) {
101
+ break;
102
+ }
103
+ const chunkText = decoder.decode(value, { stream: true });
104
+ const chunks = chunkText.split("\n").filter(Boolean);
105
+ for (const chunk of chunks) {
106
+ if (chunk.startsWith("data: ")) {
107
+ const data = chunk.slice(6);
108
+ if (data === "[DONE]") {
109
+ break;
110
+ }
111
+ yield JSON.parse(data);
112
+ }
91
113
  }
92
114
  }
115
+ } finally {
116
+ await reader.cancel();
93
117
  }
118
+ }
119
+ requestWithBackoff = async (request) => {
120
+ const [url, requestOptions] = this.processRequest(request);
94
121
  let response = void 0;
95
122
  let error = void 0;
96
123
  for (let index = 0; index < this.retry.attempts; index++) {
@@ -105,7 +132,43 @@ var HttpClient = class {
105
132
  if (!response) {
106
133
  throw error ?? new Error("Exhausted all retries");
107
134
  }
135
+ await this.checkResponse(response);
136
+ return {
137
+ response,
138
+ error
139
+ };
140
+ };
141
+ processRequest = (request) => {
142
+ const headers = new Headers(request.headers);
143
+ headers.set("Authorization", this.authorization);
144
+ const requestOptions = {
145
+ method: request.method,
146
+ headers,
147
+ body: request.body,
148
+ keepalive: request.keepalive
149
+ };
150
+ const url = new URL([this.baseUrl, ...request.path].join("/"));
151
+ if (request.query) {
152
+ for (const [key, value] of Object.entries(request.query)) {
153
+ if (value !== void 0) {
154
+ url.searchParams.set(key, value.toString());
155
+ }
156
+ }
157
+ }
158
+ return [url.toString(), requestOptions];
159
+ };
160
+ async checkResponse(response) {
108
161
  if (response.status === 429) {
162
+ if (response.headers.get("x-ratelimit-limit-requests")) {
163
+ throw new QstashChatRatelimitError({
164
+ "limit-requests": response.headers.get("x-ratelimit-limit-requests"),
165
+ "limit-tokens": response.headers.get("x-ratelimit-limit-tokens"),
166
+ "remaining-requests": response.headers.get("x-ratelimit-remaining-requests"),
167
+ "remaining-tokens": response.headers.get("x-ratelimit-remaining-tokens"),
168
+ "reset-requests": response.headers.get("x-ratelimit-reset-requests"),
169
+ "reset-tokens": response.headers.get("x-ratelimit-reset-tokens")
170
+ });
171
+ }
109
172
  throw new QstashRatelimitError({
110
173
  limit: response.headers.get("Burst-RateLimit-Limit"),
111
174
  remaining: response.headers.get("Burst-RateLimit-Remaining"),
@@ -116,10 +179,6 @@ var HttpClient = class {
116
179
  const body = await response.text();
117
180
  throw new QstashError(body.length > 0 ? body : `Error: status=${response.status}`);
118
181
  }
119
- if (request.parseResponseAsJson === false) {
120
- return void 0;
121
- }
122
- return await response.json();
123
182
  }
124
183
  };
125
184
 
@@ -192,6 +251,9 @@ function processHeaders(request) {
192
251
  }
193
252
  return headers;
194
253
  }
254
+ function getRequestPath(request) {
255
+ return request.url ?? request.topic ?? `api/${request.api}`;
256
+ }
195
257
 
196
258
  // src/client/queue.ts
197
259
  var Queue = class {
@@ -264,7 +326,7 @@ var Queue = class {
264
326
  throw new Error("Please provide a queue name to the Queue constructor");
265
327
  }
266
328
  const headers = processHeaders(request);
267
- const destination = request.url ?? request.topic;
329
+ const destination = getRequestPath(request);
268
330
  const response = await this.http.request({
269
331
  path: ["v2", "enqueue", this.queueName, destination],
270
332
  body: request.body,
@@ -415,6 +477,69 @@ var Topics = class {
415
477
  }
416
478
  };
417
479
 
480
+ // src/client/llm/chat.ts
481
+ var Chat = class _Chat {
482
+ http;
483
+ constructor(http) {
484
+ this.http = http;
485
+ }
486
+ static toChatRequest(request) {
487
+ const messages = [];
488
+ messages.push(
489
+ { role: "system", content: request.system },
490
+ { role: "user", content: request.user }
491
+ );
492
+ const chatRequest = { ...request, messages };
493
+ return chatRequest;
494
+ }
495
+ /**
496
+ * Calls the Upstash completions api given a ChatRequest.
497
+ *
498
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
499
+ * if stream is enabled.
500
+ *
501
+ * @param request ChatRequest with messages
502
+ * @returns Chat completion or stream
503
+ */
504
+ create = async (request) => {
505
+ const body = JSON.stringify(request);
506
+ if ("stream" in request && request.stream) {
507
+ return this.http.requestStream({
508
+ path: ["llm", "v1", "chat", "completions"],
509
+ method: "POST",
510
+ headers: {
511
+ "Content-Type": "application/json",
512
+ Connection: "keep-alive",
513
+ Accept: "text/event-stream",
514
+ "Cache-Control": "no-cache"
515
+ },
516
+ body
517
+ });
518
+ }
519
+ return this.http.request({
520
+ path: ["llm", "v1", "chat", "completions"],
521
+ method: "POST",
522
+ headers: { "Content-Type": "application/json" },
523
+ body
524
+ });
525
+ };
526
+ /**
527
+ * Calls the Upstash completions api given a PromptRequest.
528
+ *
529
+ * Returns a ChatCompletion or a stream of ChatCompletionChunks
530
+ * if stream is enabled.
531
+ *
532
+ * @param request PromptRequest with system and user messages.
533
+ * Note that system parameter shouldn't be passed in the case of
534
+ * mistralai/Mistral-7B-Instruct-v0.2 model.
535
+ * @returns Chat completion or stream
536
+ */
537
+ prompt = async (request) => {
538
+ const chatRequest = _Chat.toChatRequest(request);
539
+ return this.create(chatRequest);
540
+ };
541
+ };
542
+
418
543
  // src/client/client.ts
419
544
  var Client = class {
420
545
  http;
@@ -465,10 +590,18 @@ var Client = class {
465
590
  queue(request) {
466
591
  return new Queue(this.http, request?.queueName);
467
592
  }
593
+ /**
594
+ * Access the Chat API
595
+ *
596
+ * Call the create or prompt methods
597
+ */
598
+ chat() {
599
+ return new Chat(this.http);
600
+ }
468
601
  async publish(request) {
469
602
  const headers = processHeaders(request);
470
603
  const response = await this.http.request({
471
- path: ["v2", "publish", request.url ?? request.topic],
604
+ path: ["v2", "publish", getRequestPath(request)],
472
605
  body: request.body,
473
606
  headers,
474
607
  method: "POST"
@@ -498,7 +631,7 @@ var Client = class {
498
631
  const headers = processHeaders(message);
499
632
  const headerEntries = Object.fromEntries(headers.entries());
500
633
  messages.push({
501
- destination: message.url ?? message.topic,
634
+ destination: getRequestPath(message),
502
635
  headers: headerEntries,
503
636
  body: message.body,
504
637
  ...message.queueName && { queue: message.queueName }
@@ -569,8 +702,10 @@ var Client = class {
569
702
  }
570
703
  };
571
704
  export {
705
+ Chat,
572
706
  Client,
573
707
  Messages,
708
+ QstashChatRatelimitError,
574
709
  QstashError,
575
710
  QstashRatelimitError,
576
711
  Receiver,
package/package.json CHANGED
@@ -1 +1 @@
1
- {"version":"v2.5.4","name":"@upstash/qstash","description":"Official Typescript client for QStash","author":"Andreas Thomas <dev@chronark.com>","license":"MIT","homepage":"https://github.com/upstash/sdk-qstash-ts#readme","repository":{"type":"git","url":"git+https://github.com/upstash/sdk-qstash-ts.git"},"bugs":{"url":"https://github.com/upstash/sdk-qstash-ts/issues"},"main":"./index.js","module":"./index.mjs","types":"./index.d.ts","files":["./**"],"exports":{".":{"import":"./index.mjs","require":"./index.js"},"./nextjs":{"import":"./nextjs.mjs","require":"./nextjs.js"},"./dist/nextjs":{"import":"./nextjs.mjs","require":"./nextjs.js"}},"typesVersions":{"*":{"nextjs":["./nextjs.d.ts"]}},"keywords":["qstash","queue","events","serverless","upstash"],"scripts":{"build":"tsup && cp README.md ./dist/ && cp package.json ./dist/ && cp LICENSE ./dist/","test":"bun test","fmt":"prettier --write .","lint":"tsc && eslint \"src/**/*.{js,ts,tsx}\" --quiet --fix"},"devDependencies":{"@commitlint/cli":"^19.2.2","@commitlint/config-conventional":"^19.2.2","@types/bun":"^1.1.1","@types/crypto-js":"^4.2.0","@typescript-eslint/eslint-plugin":"^7.0.1","@typescript-eslint/parser":"^7.0.1","bun-types":"^1.1.7","eslint":"^8","eslint-plugin-unicorn":"^51.0.1","husky":"^9.0.10","next":"^14.0.2","prettier":"^3.2.5","tsup":"latest","typescript":"^5.4.5","undici-types":"^6.16.0","vitest":"latest"},"dependencies":{"crypto-js":">=4.2.0","jose":"^ 5.2.3"}}
1
+ {"version":"v2.5.5","name":"@upstash/qstash","description":"Official Typescript client for QStash","author":"Andreas Thomas <dev@chronark.com>","license":"MIT","homepage":"https://github.com/upstash/sdk-qstash-ts#readme","repository":{"type":"git","url":"git+https://github.com/upstash/sdk-qstash-ts.git"},"bugs":{"url":"https://github.com/upstash/sdk-qstash-ts/issues"},"main":"./index.js","module":"./index.mjs","types":"./index.d.ts","files":["./**"],"exports":{".":{"import":"./index.mjs","require":"./index.js"},"./nextjs":{"import":"./nextjs.js","require":"./nextjs.js"},"./dist/nextjs":{"import":"./nextjs.js","require":"./nextjs.js"}},"typesVersions":{"*":{"nextjs":["./nextjs.d.ts"]}},"keywords":["qstash","queue","events","serverless","upstash"],"scripts":{"build":"tsup && cp README.md ./dist/ && cp package.json ./dist/ && cp LICENSE ./dist/","test":"bun test","fmt":"prettier --write .","lint":"tsc && eslint \"src/**/*.{js,ts,tsx}\" --quiet --fix"},"devDependencies":{"@commitlint/cli":"^19.2.2","@commitlint/config-conventional":"^19.2.2","@types/bun":"^1.1.1","@types/crypto-js":"^4.2.0","@typescript-eslint/eslint-plugin":"^7.0.1","@typescript-eslint/parser":"^7.0.1","ai":"^3.1.28","bun-types":"^1.1.7","eslint":"^8","eslint-plugin-unicorn":"^51.0.1","husky":"^9.0.10","next":"^14.0.2","prettier":"^3.2.5","tsup":"latest","typescript":"^5.4.5","undici-types":"^6.16.0","vitest":"latest"},"dependencies":{"crypto-js":">=4.2.0","jose":"^ 5.2.3"}}