@upstash/qstash 2.6.3 → 2.6.4-workflow-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +4 -49
  2. package/dist/LICENSE +21 -0
  3. package/dist/README.md +160 -0
  4. package/{chunk-PTZPACVC.mjs → dist/chunk-AP5AZGAQ.mjs} +492 -472
  5. package/dist/chunk-EXC7PGHF.mjs +1540 -0
  6. package/dist/chunk-F6QRAN74.js +30 -0
  7. package/dist/chunk-OAS6SVLB.js +1540 -0
  8. package/dist/chunk-TAUBWUPA.mjs +30 -0
  9. package/{chunk-DZD3BOMO.js → dist/chunk-Z3TALRVS.js} +507 -487
  10. package/dist/index.d.mts +47 -0
  11. package/dist/index.d.ts +47 -0
  12. package/dist/index.js +38 -0
  13. package/{index.mjs → dist/index.mjs} +13 -11
  14. package/{nextjs.d.mts → dist/nextjs.d.mts} +4 -1
  15. package/{nextjs.d.ts → dist/nextjs.d.ts} +4 -1
  16. package/{nextjs.js → dist/nextjs.js} +21 -5
  17. package/{nextjs.mjs → dist/nextjs.mjs} +17 -1
  18. package/dist/nuxt.d.mts +22 -0
  19. package/dist/nuxt.d.ts +22 -0
  20. package/{nuxt.js → dist/nuxt.js} +59 -4
  21. package/dist/nuxt.mjs +103 -0
  22. package/dist/package.json +105 -0
  23. package/dist/solidjs.d.mts +13 -0
  24. package/dist/solidjs.d.ts +13 -0
  25. package/{solidjs.js → dist/solidjs.js} +25 -4
  26. package/{solidjs.mjs → dist/solidjs.mjs} +23 -2
  27. package/{svelte.d.mts → dist/svelte.d.mts} +4 -1
  28. package/{svelte.d.ts → dist/svelte.d.ts} +4 -1
  29. package/{svelte.js → dist/svelte.js} +27 -4
  30. package/{svelte.mjs → dist/svelte.mjs} +25 -2
  31. package/{index.d.mts → dist/types-BBuddmpw.d.mts} +416 -50
  32. package/{index.d.ts → dist/types-BBuddmpw.d.ts} +416 -50
  33. package/dist/workflow.d.mts +15 -0
  34. package/dist/workflow.d.ts +15 -0
  35. package/dist/workflow.js +15 -0
  36. package/dist/workflow.mjs +15 -0
  37. package/package.json +105 -1
  38. package/chunk-CP4IU45K.mjs +0 -59
  39. package/chunk-UUR7N6E6.js +0 -59
  40. package/index.js +0 -36
  41. package/nuxt.d.mts +0 -11
  42. package/nuxt.d.ts +0 -11
  43. package/nuxt.mjs +0 -48
  44. package/solidjs.d.mts +0 -10
  45. package/solidjs.d.ts +0 -10
package/README.md CHANGED
@@ -55,11 +55,11 @@ import { Client } from "@upstash/qstash";
55
55
  */
56
56
  import "isomorphic-fetch";
57
57
 
58
- const client = new Client({
58
+ const c = new Client({
59
59
  token: "<QSTASH_TOKEN>",
60
60
  });
61
61
 
62
- const res = await client.publishJSON({
62
+ const res = await c.publishJSON({
63
63
  url: "https://my-api...",
64
64
  // or urlGroup: "the name or id of a url group"
65
65
  body: {
@@ -107,7 +107,7 @@ No need for complicated setup your LLM request. We'll call LLM and schedule it f
107
107
  ```ts
108
108
  import { Client, openai } from "@upstash/qstash";
109
109
 
110
- const client = new Client({
110
+ const c = new Client({
111
111
  token: "<QSTASH_TOKEN>",
112
112
  });
113
113
 
@@ -131,11 +131,7 @@ const result = await client.publishJSON({
131
131
  You can easily start streaming Upstash or OpenAI responses from your favorite framework(Next.js) or library
132
132
 
133
133
  ```ts
134
- import { Client, upstash } from "@upstash/qstash";
135
-
136
- const client = new Client({
137
- token: "<QSTASH_TOKEN>",
138
- });
134
+ import { upstash } from "@upstash/qstash";
139
135
 
140
136
  const response = await client.chat().create({
141
137
  provider: upstash(), // Optionally, provider: "custom({token: "XXX", baseUrl: "https://api.openai.com"})". This will allow you to call every OpenAI compatible API out there.
@@ -155,43 +151,6 @@ const response = await client.chat().create({
155
151
  });
156
152
  ```
157
153
 
158
- ### Add Observability via Helicone
159
-
160
- Helicone is a powerful observability platform that provides valuable insights into your LLM usage. Integrating Helicone with QStash is straightforward.
161
-
162
- To enable Helicone observability in QStash, you simply need to pass your Helicone API key when initializing your model. Here's how to do it for both custom models and OpenAI:
163
-
164
- #### For Custom Models (e.g., Meta-Llama)
165
-
166
- ```ts
167
- import { Client, custom } from "@upstash/qstash";
168
-
169
- const client = new Client({
170
- token: "<QSTASH_TOKEN>",
171
- });
172
-
173
- await client.publishJSON({
174
- api: {
175
- name: "llm",
176
- provider: custom({
177
- token: "XXX",
178
- baseUrl: "https://api.together.xyz",
179
- }),
180
- analytics: { name: "helicone", token: process.env.HELICONE_API_KEY! },
181
- },
182
- body: {
183
- model: "meta-llama/Llama-3-8b-chat-hf",
184
- messages: [
185
- {
186
- role: "user",
187
- content: "hello",
188
- },
189
- ],
190
- },
191
- callback: "https://oz.requestcatcher.com/",
192
- });
193
- ```
194
-
195
154
  ## Docs
196
155
 
197
156
  See [the documentation](https://docs.upstash.com/qstash) for details.
@@ -199,7 +158,3 @@ See [the documentation](https://docs.upstash.com/qstash) for details.
199
158
  ## Contributing
200
159
 
201
160
  ### [Install Deno](https://deno.land/#installation)
202
-
203
- ```
204
-
205
- ```
package/dist/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2022 Upstash, Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/dist/README.md ADDED
@@ -0,0 +1,160 @@
1
+ # Upstash QStash SDK
2
+
3
+ ![npm (scoped)](https://img.shields.io/npm/v/@upstash/qstash)
4
+
5
+ > [!NOTE] > **This project is in GA Stage.**
6
+ > The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
7
+ > The Upstash team is committed to maintaining and improving its functionality.
8
+
9
+ **QStash** is an HTTP based messaging and scheduling solution for serverless and
10
+ edge runtimes.
11
+
12
+ It is 100% built on stateless HTTP requests and designed for:
13
+
14
+ - Serverless functions (AWS Lambda ...)
15
+ - Cloudflare Workers (see
16
+ [the example](https://github.com/upstash/sdk-qstash-ts/tree/main/examples/cloudflare-workers))
17
+ - Fastly Compute@Edge
18
+ - Next.js, including [edge](https://nextjs.org/docs/api-reference/edge-runtime)
19
+ - Deno
20
+ - Client side web/mobile applications
21
+ - WebAssembly
22
+ - and other environments where HTTP is preferred over TCP.
23
+
24
+ ## How does QStash work?
25
+
26
+ QStash is the message broker between your serverless apps. You send an HTTP
27
+ request to QStash, that includes a destination, a payload and optional settings.
28
+ We durably store your message and will deliver it to the destination API via
29
+ HTTP. In case the destination is not ready to receive the message, we will retry
30
+ the message later, to guarentee at-least-once delivery.
31
+
32
+ ## Quick Start
33
+
34
+ ### Install
35
+
36
+ #### npm
37
+
38
+ ```bash
39
+ npm install @upstash/qstash
40
+ ```
41
+
42
+ ### Get your authorization token
43
+
44
+ Go to [Upstash Console](https://console.upstash.com/qstash) and copy the QSTASH_TOKEN.
45
+
46
+ ## Basic Usage:
47
+
48
+ ### Publishing a message
49
+
50
+ ```ts
51
+ import { Client } from "@upstash/qstash";
52
+ /**
53
+ * Import a fetch polyfill only if you are using node prior to v18.
54
+ * This is not necessary for nextjs, deno or cloudflare workers.
55
+ */
56
+ import "isomorphic-fetch";
57
+
58
+ const c = new Client({
59
+ token: "<QSTASH_TOKEN>",
60
+ });
61
+
62
+ const res = await c.publishJSON({
63
+ url: "https://my-api...",
64
+ // or urlGroup: "the name or id of a url group"
65
+ body: {
66
+ hello: "world",
67
+ },
68
+ });
69
+ console.log(res);
70
+ // { messageId: "msg_xxxxxxxxxxxxxxxx" }
71
+ ```
72
+
73
+ ### Receiving a message
74
+
75
+ How to receive a message depends on your http server. The `Receiver.verify`
76
+ method should be called by you as the first step in your handler function.
77
+
78
+ ```ts
79
+ import { Receiver } from "@upstash/qstash";
80
+
81
+ const r = new Receiver({
82
+ currentSigningKey: "..",
83
+ nextSigningKey: "..",
84
+ });
85
+
86
+ const isValid = await r.verify({
87
+ /**
88
+ * The signature from the `Upstash-Signature` header.
89
+ *
90
+ * Please note that on some platforms (e.g. Vercel or Netlify) you might
91
+ * receive the header in lower case: `upstash-signature`
92
+ *
93
+ */
94
+ signature: "string";
95
+
96
+ /**
97
+ * The raw request body.
98
+ */
99
+ body: "string";
100
+ })
101
+ ```
102
+
103
+ ### Publishing a message to Open AI or any Open AI Compatible LLM
104
+
105
+ No need for complicated setup your LLM request. We'll call LLM and schedule it for your serverless needs.
106
+
107
+ ```ts
108
+ import { Client, openai } from "@upstash/qstash";
109
+
110
+ const c = new Client({
111
+ token: "<QSTASH_TOKEN>",
112
+ });
113
+
114
+ const result = await client.publishJSON({
115
+ api: { name: "llm", provider: openai({ token: process.env.OPENAI_API_KEY! }) },
116
+ body: {
117
+ model: "gpt-3.5-turbo",
118
+ messages: [
119
+ {
120
+ role: "user",
121
+ content: "Where is the capital of Turkey?",
122
+ },
123
+ ],
124
+ },
125
+ callback: "https://oz.requestcatcher.com/",
126
+ });
127
+ ```
128
+
129
+ ### Chatting with your favorite LLM
130
+
131
+ You can easily start streaming Upstash or OpenAI responses from your favorite framework(Next.js) or library
132
+
133
+ ```ts
134
+ import { upstash } from "@upstash/qstash";
135
+
136
+ const response = await client.chat().create({
137
+ provider: upstash(), // Optionally, provider: "custom({token: "XXX", baseUrl: "https://api.openai.com"})". This will allow you to call every OpenAI compatible API out there.
138
+ model: "meta-llama/Meta-Llama-3-8B-Instruct", // Optionally, model: "gpt-3.5-turbo",
139
+ messages: [
140
+ {
141
+ role: "system",
142
+ content: "from now on, foo is whale",
143
+ },
144
+ {
145
+ role: "user",
146
+ content: "what exactly is foo?",
147
+ },
148
+ ],
149
+ stream: true,
150
+ temperature: 0.5,
151
+ });
152
+ ```
153
+
154
+ ## Docs
155
+
156
+ See [the documentation](https://docs.upstash.com/qstash) for details.
157
+
158
+ ## Contributing
159
+
160
+ ### [Install Deno](https://deno.land/#installation)