@modelrelay/sdk 1.3.3 → 1.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +258 -141
- package/dist/index.cjs +4934 -5654
- package/dist/index.d.cts +2953 -398
- package/dist/index.d.ts +2953 -398
- package/dist/index.js +4884 -5654
- package/package.json +18 -6
package/README.md
CHANGED
|
@@ -1,40 +1,215 @@
|
|
|
1
1
|
# ModelRelay TypeScript SDK
|
|
2
2
|
|
|
3
|
-
The ModelRelay TypeScript SDK is a **responses-first**, **streaming-first** client for building cross-provider LLM features without committing to any single vendor API.
|
|
4
|
-
|
|
5
|
-
It's designed to feel great in TypeScript:
|
|
6
|
-
- One fluent builder for **streaming/non-streaming**, **text/structured**, and **customer-attributed** requests
|
|
7
|
-
- Structured outputs powered by Zod schemas with validation and retry
|
|
8
|
-
- A practical tool-use toolkit for "LLM + tools" apps
|
|
9
|
-
|
|
10
3
|
```bash
|
|
11
|
-
npm install @modelrelay/sdk
|
|
12
|
-
# or
|
|
13
4
|
bun add @modelrelay/sdk
|
|
14
5
|
```
|
|
15
6
|
|
|
16
|
-
##
|
|
7
|
+
## Token Providers (Automatic Bearer Auth)
|
|
8
|
+
|
|
9
|
+
Use token providers when you want the SDK to automatically obtain/refresh **bearer tokens** for data-plane calls like `/responses` and `/runs`.
|
|
10
|
+
|
|
11
|
+
### OIDC id_token → customer bearer token (exchange)
|
|
12
|
+
|
|
13
|
+
```ts
|
|
14
|
+
import { ModelRelay, OIDCExchangeTokenProvider, parseSecretKey } from "@modelrelay/sdk";
|
|
15
|
+
|
|
16
|
+
const tokenProvider = new OIDCExchangeTokenProvider({
|
|
17
|
+
apiKey: parseSecretKey(process.env.MODELRELAY_API_KEY!),
|
|
18
|
+
idTokenProvider: async () => {
|
|
19
|
+
// Return an OIDC id_token from your auth system (web login, device flow, etc).
|
|
20
|
+
return process.env.OIDC_ID_TOKEN!;
|
|
21
|
+
},
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
const mr = new ModelRelay({ tokenProvider });
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
If you need an `id_token` in a CLI-like context, you can use the OAuth device flow helper:
|
|
28
|
+
|
|
29
|
+
```ts
|
|
30
|
+
import { runOAuthDeviceFlowForIDToken } from "@modelrelay/sdk";
|
|
31
|
+
|
|
32
|
+
const idToken = await runOAuthDeviceFlowForIDToken({
|
|
33
|
+
deviceAuthorizationEndpoint: "https://issuer.example.com/oauth/device/code",
|
|
34
|
+
tokenEndpoint: "https://issuer.example.com/oauth/token",
|
|
35
|
+
clientId: "your-client-id",
|
|
36
|
+
scope: "openid email profile",
|
|
37
|
+
onUserCode: ({ verificationUri, userCode }) => {
|
|
38
|
+
console.log(`Open ${verificationUri} and enter code: ${userCode}`);
|
|
39
|
+
},
|
|
40
|
+
});
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Secret key → customer bearer token (mint)
|
|
44
|
+
|
|
45
|
+
```ts
|
|
46
|
+
import { CustomerTokenProvider, ModelRelay } from "@modelrelay/sdk";
|
|
47
|
+
|
|
48
|
+
const tokenProvider = new CustomerTokenProvider({
|
|
49
|
+
secretKey: process.env.MODELRELAY_API_KEY!,
|
|
50
|
+
request: { customerId: "customer_..." },
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
const mr = new ModelRelay({ tokenProvider });
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Streaming Responses
|
|
17
57
|
|
|
18
58
|
```ts
|
|
19
59
|
import { ModelRelay } from "@modelrelay/sdk";
|
|
20
60
|
|
|
21
|
-
const mr = ModelRelay.fromSecretKey(
|
|
61
|
+
const mr = ModelRelay.fromSecretKey("mr_sk_...");
|
|
62
|
+
|
|
63
|
+
const req = mr.responses
|
|
64
|
+
.new()
|
|
65
|
+
.model("claude-sonnet-4-20250514")
|
|
66
|
+
.user("Hello")
|
|
67
|
+
.build();
|
|
68
|
+
|
|
69
|
+
const stream = await mr.responses.stream(req);
|
|
70
|
+
|
|
71
|
+
for await (const event of stream) {
|
|
72
|
+
if (event.type === "message_delta" && event.textDelta) {
|
|
73
|
+
process.stdout.write(event.textDelta);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Customer-Scoped Convenience
|
|
22
79
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
80
|
+
```ts
|
|
81
|
+
import { ModelRelay } from "@modelrelay/sdk";
|
|
82
|
+
|
|
83
|
+
const mr = ModelRelay.fromSecretKey("mr_sk_...");
|
|
84
|
+
const customer = mr.forCustomer("customer_abc123");
|
|
85
|
+
|
|
86
|
+
const text = await customer.responses.text(
|
|
87
|
+
"You are a helpful assistant.",
|
|
88
|
+
"Summarize Q4 results",
|
|
30
89
|
);
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
You can also stream structured JSON for a specific customer:
|
|
93
|
+
|
|
94
|
+
```ts
|
|
95
|
+
import { z } from "zod";
|
|
96
|
+
import { ModelRelay, outputFormatFromZod } from "@modelrelay/sdk";
|
|
97
|
+
|
|
98
|
+
const mr = ModelRelay.fromSecretKey("mr_sk_...");
|
|
99
|
+
const customer = mr.forCustomer("customer_abc123");
|
|
100
|
+
|
|
101
|
+
const schema = z.object({
|
|
102
|
+
summary: z.string(),
|
|
103
|
+
highlights: z.array(z.string()),
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
const req = customer.responses
|
|
107
|
+
.new()
|
|
108
|
+
.outputFormat(outputFormatFromZod(schema))
|
|
109
|
+
.system("You are a helpful assistant.")
|
|
110
|
+
.user("Summarize Q4 results")
|
|
111
|
+
.build();
|
|
112
|
+
|
|
113
|
+
const stream = await customer.responses.streamJSON<z.infer<typeof schema>>(req);
|
|
114
|
+
for await (const event of stream) {
|
|
115
|
+
if (event.type === "completion") {
|
|
116
|
+
console.log(event.payload);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
You can also pass a single object to `textForCustomer`:
|
|
31
122
|
|
|
32
|
-
|
|
123
|
+
```ts
|
|
124
|
+
const text = await mr.responses.textForCustomer({
|
|
125
|
+
customerId: "customer_abc123",
|
|
126
|
+
system: "You are a helpful assistant.",
|
|
127
|
+
user: "Summarize Q4 results",
|
|
128
|
+
});
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Workflows
|
|
132
|
+
|
|
133
|
+
High-level helpers for common workflow patterns:
|
|
134
|
+
|
|
135
|
+
### Chain (Sequential)
|
|
136
|
+
|
|
137
|
+
Sequential LLM calls where each step's output feeds the next step's input:
|
|
138
|
+
|
|
139
|
+
```ts
|
|
140
|
+
import { chain, llmStep } from "@modelrelay/sdk";
|
|
141
|
+
|
|
142
|
+
const summarizeReq = mr.responses
|
|
143
|
+
.new()
|
|
144
|
+
.model("claude-sonnet-4-20250514")
|
|
145
|
+
.system("Summarize the input concisely.")
|
|
146
|
+
.user("The quick brown fox...")
|
|
147
|
+
.build();
|
|
148
|
+
|
|
149
|
+
const translateReq = mr.responses
|
|
150
|
+
.new()
|
|
151
|
+
.model("claude-sonnet-4-20250514")
|
|
152
|
+
.system("Translate the input to French.")
|
|
153
|
+
.user("") // Bound from previous step
|
|
154
|
+
.build();
|
|
155
|
+
|
|
156
|
+
const spec = chain("summarize-translate")
|
|
157
|
+
.step(llmStep("summarize", summarizeReq))
|
|
158
|
+
.step(llmStep("translate", translateReq).withStream())
|
|
159
|
+
.outputLast("result")
|
|
160
|
+
.build();
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### Parallel (Fan-out with Aggregation)
|
|
164
|
+
|
|
165
|
+
Concurrent LLM calls with optional aggregation:
|
|
166
|
+
|
|
167
|
+
```ts
|
|
168
|
+
import { parallel, llmStep } from "@modelrelay/sdk";
|
|
169
|
+
|
|
170
|
+
const gpt4Req = mr.responses.new().model("gpt-4.1").user("Analyze this...").build();
|
|
171
|
+
const claudeReq = mr.responses.new().model("claude-sonnet-4-20250514").user("Analyze this...").build();
|
|
172
|
+
const synthesizeReq = mr.responses
|
|
173
|
+
.new()
|
|
174
|
+
.model("claude-sonnet-4-20250514")
|
|
175
|
+
.system("Synthesize the analyses into a unified view.")
|
|
176
|
+
.user("") // Bound from join output
|
|
177
|
+
.build();
|
|
178
|
+
|
|
179
|
+
const spec = parallel("multi-model-compare")
|
|
180
|
+
.step(llmStep("gpt4", gpt4Req))
|
|
181
|
+
.step(llmStep("claude", claudeReq))
|
|
182
|
+
.aggregate("synthesize", synthesizeReq)
|
|
183
|
+
.output("result", "synthesize")
|
|
184
|
+
.build();
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### MapReduce (Parallel Map with Reduce)
|
|
188
|
+
|
|
189
|
+
Process items in parallel, then combine results:
|
|
190
|
+
|
|
191
|
+
```ts
|
|
192
|
+
import { mapReduce } from "@modelrelay/sdk";
|
|
193
|
+
|
|
194
|
+
const combineReq = mr.responses
|
|
195
|
+
.new()
|
|
196
|
+
.model("claude-sonnet-4-20250514")
|
|
197
|
+
.system("Combine summaries into a cohesive overview.")
|
|
198
|
+
.user("") // Bound from join output
|
|
199
|
+
.build();
|
|
200
|
+
|
|
201
|
+
const spec = mapReduce("summarize-docs")
|
|
202
|
+
.item("doc1", doc1Req)
|
|
203
|
+
.item("doc2", doc2Req)
|
|
204
|
+
.item("doc3", doc3Req)
|
|
205
|
+
.reduce("combine", combineReq)
|
|
206
|
+
.output("result", "combine")
|
|
207
|
+
.build();
|
|
33
208
|
```
|
|
34
209
|
|
|
35
210
|
## Chat-Like Text Helpers
|
|
36
211
|
|
|
37
|
-
For the most common path (**system + user → assistant text**)
|
|
212
|
+
For the most common path (**system + user → assistant text**):
|
|
38
213
|
|
|
39
214
|
```ts
|
|
40
215
|
const text = await mr.responses.text(
|
|
@@ -48,16 +223,14 @@ console.log(text);
|
|
|
48
223
|
For customer-attributed requests where the backend selects the model:
|
|
49
224
|
|
|
50
225
|
```ts
|
|
51
|
-
const
|
|
52
|
-
|
|
53
|
-
"
|
|
54
|
-
"
|
|
226
|
+
const text = await mr.responses.textForCustomer(
|
|
227
|
+
"customer-123",
|
|
228
|
+
"Answer concisely.",
|
|
229
|
+
"Say hi.",
|
|
55
230
|
);
|
|
56
231
|
```
|
|
57
232
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
Stream text deltas for real-time output:
|
|
233
|
+
To stream only message text deltas:
|
|
61
234
|
|
|
62
235
|
```ts
|
|
63
236
|
const deltas = await mr.responses.streamTextDeltas(
|
|
@@ -70,31 +243,14 @@ for await (const delta of deltas) {
|
|
|
70
243
|
}
|
|
71
244
|
```
|
|
72
245
|
|
|
73
|
-
For full control, stream typed events:
|
|
74
|
-
|
|
75
|
-
```ts
|
|
76
|
-
const req = mr.responses
|
|
77
|
-
.new()
|
|
78
|
-
.model("claude-sonnet-4-20250514")
|
|
79
|
-
.user("Hello")
|
|
80
|
-
.build();
|
|
81
|
-
|
|
82
|
-
const stream = await mr.responses.stream(req);
|
|
83
|
-
|
|
84
|
-
for await (const event of stream) {
|
|
85
|
-
if (event.type === "message_delta" && event.textDelta) {
|
|
86
|
-
process.stdout.write(event.textDelta);
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
```
|
|
90
|
-
|
|
91
246
|
## Structured Outputs with Zod
|
|
92
247
|
|
|
93
|
-
Get typed, validated responses from the model:
|
|
94
|
-
|
|
95
248
|
```ts
|
|
249
|
+
import { ModelRelay, parseSecretKey } from "@modelrelay/sdk";
|
|
96
250
|
import { z } from "zod";
|
|
97
251
|
|
|
252
|
+
const mr = new ModelRelay({ key: parseSecretKey("mr_sk_...") });
|
|
253
|
+
|
|
98
254
|
const Person = z.object({
|
|
99
255
|
name: z.string(),
|
|
100
256
|
age: z.number(),
|
|
@@ -109,11 +265,16 @@ const result = await mr.responses.structured(
|
|
|
109
265
|
console.log(result.value); // { name: "John Doe", age: 30 }
|
|
110
266
|
```
|
|
111
267
|
|
|
112
|
-
|
|
268
|
+
## Streaming Structured Outputs
|
|
113
269
|
|
|
114
270
|
Build progressive UIs that render fields as they complete:
|
|
115
271
|
|
|
116
272
|
```ts
|
|
273
|
+
import { ModelRelay, parseSecretKey } from "@modelrelay/sdk";
|
|
274
|
+
import { z } from "zod";
|
|
275
|
+
|
|
276
|
+
const mr = new ModelRelay({ key: parseSecretKey("mr_sk_...") });
|
|
277
|
+
|
|
117
278
|
const Article = z.object({
|
|
118
279
|
title: z.string(),
|
|
119
280
|
summary: z.string(),
|
|
@@ -126,12 +287,15 @@ const stream = await mr.responses.streamStructured(
|
|
|
126
287
|
);
|
|
127
288
|
|
|
128
289
|
for await (const event of stream) {
|
|
290
|
+
// Render fields as soon as they're complete
|
|
129
291
|
if (event.completeFields.has("title")) {
|
|
130
292
|
renderTitle(event.payload.title); // Safe to display
|
|
131
293
|
}
|
|
132
294
|
if (event.completeFields.has("summary")) {
|
|
133
295
|
renderSummary(event.payload.summary);
|
|
134
296
|
}
|
|
297
|
+
|
|
298
|
+
// Show streaming preview of incomplete fields
|
|
135
299
|
if (!event.completeFields.has("body")) {
|
|
136
300
|
renderBodyPreview(event.payload.body + "▋");
|
|
137
301
|
}
|
|
@@ -140,7 +304,7 @@ for await (const event of stream) {
|
|
|
140
304
|
|
|
141
305
|
## Customer-Attributed Requests
|
|
142
306
|
|
|
143
|
-
For metered billing, use `customerId()
|
|
307
|
+
For metered billing, use `customerId()` — the customer's subscription tier determines the model and `model` can be omitted:
|
|
144
308
|
|
|
145
309
|
```ts
|
|
146
310
|
const req = mr.responses
|
|
@@ -152,122 +316,64 @@ const req = mr.responses
|
|
|
152
316
|
const stream = await mr.responses.stream(req);
|
|
153
317
|
```
|
|
154
318
|
|
|
155
|
-
Or use the convenience method:
|
|
156
|
-
|
|
157
|
-
```ts
|
|
158
|
-
const text = await mr.responses.textForCustomer(
|
|
159
|
-
"customer-123",
|
|
160
|
-
"Answer concisely.",
|
|
161
|
-
"Say hi.",
|
|
162
|
-
);
|
|
163
|
-
```
|
|
164
|
-
|
|
165
319
|
## Customer Management (Backend)
|
|
166
320
|
|
|
167
321
|
```ts
|
|
168
322
|
// Create/update customer
|
|
169
323
|
const customer = await mr.customers.upsert({
|
|
170
|
-
tier_id: "tier-uuid",
|
|
171
324
|
external_id: "your-user-id",
|
|
172
325
|
email: "user@example.com",
|
|
173
326
|
});
|
|
174
327
|
|
|
175
328
|
// Create checkout session for subscription billing
|
|
176
|
-
const session = await mr.customers.
|
|
329
|
+
const session = await mr.customers.subscribe(customer.customer.id, {
|
|
330
|
+
tier_id: "tier-uuid",
|
|
177
331
|
success_url: "https://myapp.com/success",
|
|
178
332
|
cancel_url: "https://myapp.com/cancel",
|
|
179
333
|
});
|
|
180
334
|
|
|
181
335
|
// Check subscription status
|
|
182
|
-
const status = await mr.customers.getSubscription(customer.id);
|
|
336
|
+
const status = await mr.customers.getSubscription(customer.customer.id);
|
|
183
337
|
```
|
|
184
338
|
|
|
185
|
-
##
|
|
339
|
+
## Error Handling
|
|
186
340
|
|
|
187
|
-
|
|
341
|
+
Errors are typed so callers can branch cleanly:
|
|
188
342
|
|
|
189
343
|
```ts
|
|
190
|
-
import {
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
{
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
{
|
|
220
|
-
bindings: [
|
|
221
|
-
{ from: parseNodeId("join"), to: "/input/1/content/0/text", encoding: "json_string" } satisfies LLMResponsesBindingV0,
|
|
222
|
-
],
|
|
223
|
-
},
|
|
224
|
-
)
|
|
225
|
-
.edge(parseNodeId("agent_a"), parseNodeId("join"))
|
|
226
|
-
.edge(parseNodeId("agent_b"), parseNodeId("join"))
|
|
227
|
-
.edge(parseNodeId("join"), parseNodeId("aggregate"))
|
|
228
|
-
.output(parseOutputName("result"), parseNodeId("aggregate"))
|
|
229
|
-
.build();
|
|
230
|
-
|
|
231
|
-
const { run_id } = await mr.runs.create(spec);
|
|
232
|
-
|
|
233
|
-
for await (const ev of await mr.runs.events(run_id)) {
|
|
234
|
-
if (ev.type === "run_completed") {
|
|
235
|
-
const status = await mr.runs.get(run_id);
|
|
236
|
-
console.log("outputs:", status.outputs);
|
|
344
|
+
import {
|
|
345
|
+
ModelRelay,
|
|
346
|
+
APIError,
|
|
347
|
+
TransportError,
|
|
348
|
+
StreamTimeoutError,
|
|
349
|
+
ConfigError,
|
|
350
|
+
} from "@modelrelay/sdk";
|
|
351
|
+
|
|
352
|
+
try {
|
|
353
|
+
const response = await mr.responses.text(
|
|
354
|
+
"claude-sonnet-4-20250514",
|
|
355
|
+
"You are helpful.",
|
|
356
|
+
"Hello!"
|
|
357
|
+
);
|
|
358
|
+
} catch (error) {
|
|
359
|
+
if (error instanceof APIError) {
|
|
360
|
+
console.log("Status:", error.status);
|
|
361
|
+
console.log("Code:", error.code);
|
|
362
|
+
console.log("Message:", error.message);
|
|
363
|
+
|
|
364
|
+
if (error.isRateLimit()) {
|
|
365
|
+
// Back off and retry
|
|
366
|
+
} else if (error.isUnauthorized()) {
|
|
367
|
+
// Re-authenticate
|
|
368
|
+
}
|
|
369
|
+
} else if (error instanceof TransportError) {
|
|
370
|
+
console.log("Network error:", error.message);
|
|
371
|
+
} else if (error instanceof StreamTimeoutError) {
|
|
372
|
+
console.log("Stream timeout:", error.streamKind); // "ttft" | "idle" | "total"
|
|
237
373
|
}
|
|
238
374
|
}
|
|
239
375
|
```
|
|
240
376
|
|
|
241
|
-
## Token Providers (Advanced)
|
|
242
|
-
|
|
243
|
-
For automatic bearer token management in data-plane calls:
|
|
244
|
-
|
|
245
|
-
### Secret key → customer bearer token
|
|
246
|
-
|
|
247
|
-
```ts
|
|
248
|
-
import { CustomerTokenProvider, ModelRelay } from "@modelrelay/sdk";
|
|
249
|
-
|
|
250
|
-
const tokenProvider = new CustomerTokenProvider({
|
|
251
|
-
secretKey: process.env.MODELRELAY_API_KEY!,
|
|
252
|
-
request: { projectId: "proj_...", customerId: "cust_..." },
|
|
253
|
-
});
|
|
254
|
-
|
|
255
|
-
const mr = new ModelRelay({ tokenProvider });
|
|
256
|
-
```
|
|
257
|
-
|
|
258
|
-
### OIDC exchange
|
|
259
|
-
|
|
260
|
-
```ts
|
|
261
|
-
import { ModelRelay, OIDCExchangeTokenProvider, parseSecretKey } from "@modelrelay/sdk";
|
|
262
|
-
|
|
263
|
-
const tokenProvider = new OIDCExchangeTokenProvider({
|
|
264
|
-
apiKey: parseSecretKey(process.env.MODELRELAY_API_KEY!),
|
|
265
|
-
idTokenProvider: async () => process.env.OIDC_ID_TOKEN!,
|
|
266
|
-
});
|
|
267
|
-
|
|
268
|
-
const mr = new ModelRelay({ tokenProvider });
|
|
269
|
-
```
|
|
270
|
-
|
|
271
377
|
## Configuration
|
|
272
378
|
|
|
273
379
|
```ts
|
|
@@ -278,3 +384,14 @@ const mr = new ModelRelay({
|
|
|
278
384
|
retry: { maxAttempts: 3 },
|
|
279
385
|
});
|
|
280
386
|
```
|
|
387
|
+
|
|
388
|
+
## Documentation
|
|
389
|
+
|
|
390
|
+
For detailed guides and API reference, visit [docs.modelrelay.ai](https://docs.modelrelay.ai):
|
|
391
|
+
|
|
392
|
+
- [First Request](https://docs.modelrelay.ai/getting-started/first-request) — Make your first API call
|
|
393
|
+
- [Streaming](https://docs.modelrelay.ai/guides/streaming) — Real-time response streaming
|
|
394
|
+
- [Structured Output](https://docs.modelrelay.ai/guides/structured-output) — Get typed JSON responses
|
|
395
|
+
- [Tool Use](https://docs.modelrelay.ai/guides/tools) — Let models call functions
|
|
396
|
+
- [Error Handling](https://docs.modelrelay.ai/guides/error-handling) — Handle errors gracefully
|
|
397
|
+
- [Workflows](https://docs.modelrelay.ai/guides/workflows) — Multi-step AI pipelines
|