@upstash/qstash 2.6.2 → 2.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -55,11 +55,11 @@ import { Client } from "@upstash/qstash";
55
55
  */
56
56
  import "isomorphic-fetch";
57
57
 
58
- const c = new Client({
58
+ const client = new Client({
59
59
  token: "<QSTASH_TOKEN>",
60
60
  });
61
61
 
62
- const res = await c.publishJSON({
62
+ const res = await client.publishJSON({
63
63
  url: "https://my-api...",
64
64
  // or urlGroup: "the name or id of a url group"
65
65
  body: {
@@ -107,7 +107,7 @@ No need for complicated setup your LLM request. We'll call LLM and schedule it f
107
107
  ```ts
108
108
  import { Client, openai } from "@upstash/qstash";
109
109
 
110
- const c = new Client({
110
+ const client = new Client({
111
111
  token: "<QSTASH_TOKEN>",
112
112
  });
113
113
 
@@ -131,7 +131,11 @@ const result = await client.publishJSON({
131
131
  You can easily start streaming Upstash or OpenAI responses from your favorite framework(Next.js) or library
132
132
 
133
133
  ```ts
134
- import { upstash } from "@upstash/qstash";
134
+ import { Client, upstash } from "@upstash/qstash";
135
+
136
+ const client = new Client({
137
+ token: "<QSTASH_TOKEN>",
138
+ });
135
139
 
136
140
  const response = await client.chat().create({
137
141
  provider: upstash(), // Optionally, provider: "custom({token: "XXX", baseUrl: "https://api.openai.com"})". This will allow you to call every OpenAI compatible API out there.
@@ -151,6 +155,43 @@ const response = await client.chat().create({
151
155
  });
152
156
  ```
153
157
 
158
+ ### Add Observability via Helicone
159
+
160
+ Helicone is a powerful observability platform that provides valuable insights into your LLM usage. Integrating Helicone with QStash is straightforward.
161
+
162
+ To enable Helicone observability in QStash, you simply need to pass your Helicone API key when initializing your model. Here's how to do it for both custom models and OpenAI:
163
+
164
+ #### For Custom Models (e.g., Meta-Llama)
165
+
166
+ ```ts
167
+ import { Client, custom } from "@upstash/qstash";
168
+
169
+ const client = new Client({
170
+ token: "<QSTASH_TOKEN>",
171
+ });
172
+
173
+ await client.publishJSON({
174
+ api: {
175
+ name: "llm",
176
+ provider: custom({
177
+ token: "XXX",
178
+ baseUrl: "https://api.together.xyz",
179
+ }),
180
+ analytics: { name: "helicone", token: process.env.HELICONE_API_KEY! },
181
+ },
182
+ body: {
183
+ model: "meta-llama/Llama-3-8b-chat-hf",
184
+ messages: [
185
+ {
186
+ role: "user",
187
+ content: "hello",
188
+ },
189
+ ],
190
+ },
191
+ callback: "https://oz.requestcatcher.com/",
192
+ });
193
+ ```
194
+
154
195
  ## Docs
155
196
 
156
197
  See [the documentation](https://docs.upstash.com/qstash) for details.
@@ -158,3 +199,7 @@ See [the documentation](https://docs.upstash.com/qstash) for details.
158
199
  ## Contributing
159
200
 
160
201
  ### [Install Deno](https://deno.land/#installation)
202
+
203
+ ```
204
+
205
+ ```