@modelrelay/sdk 1.10.3 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -128,89 +128,84 @@ const text = await mr.responses.textForCustomer({
128
128
  });
129
129
  ```
130
130
 
131
- ## Workflow Runs (workflow.v0)
131
+ ## Workflows
132
+
133
+ High-level helpers for common workflow patterns:
134
+
135
+ ### Chain (Sequential)
136
+
137
+ Sequential LLM calls where each step's output feeds the next step's input:
132
138
 
133
139
  ```ts
134
- import {
135
- ModelRelay,
136
- type LLMResponsesBindingV0,
137
- parseNodeId,
138
- parseOutputName,
139
- parseSecretKey,
140
- workflowV0,
141
- } from "@modelrelay/sdk";
140
+ import { chain, llmStep } from "@modelrelay/sdk";
142
141
 
143
- const mr = new ModelRelay({ key: parseSecretKey("mr_sk_...") });
142
+ const summarizeReq = mr.responses
143
+ .new()
144
+ .model("claude-sonnet-4-20250514")
145
+ .system("Summarize the input concisely.")
146
+ .user("The quick brown fox...")
147
+ .build();
144
148
 
145
- const spec = workflowV0()
146
- .name("multi_agent_v0_example")
147
- .execution({ max_parallelism: 3, node_timeout_ms: 20_000, run_timeout_ms: 30_000 })
148
- .llmResponses(parseNodeId("agent_a"), {
149
- model: "claude-sonnet-4-20250514",
150
- input: [
151
- { type: "message", role: "system", content: [{ type: "text", text: "You are Agent A." }] },
152
- { type: "message", role: "user", content: [{ type: "text", text: "Write 3 ideas for a landing page." }] },
153
- ],
154
- })
155
- .llmResponses(parseNodeId("agent_b"), {
156
- model: "claude-sonnet-4-20250514",
157
- input: [
158
- { type: "message", role: "system", content: [{ type: "text", text: "You are Agent B." }] },
159
- { type: "message", role: "user", content: [{ type: "text", text: "Write 3 objections a user might have." }] },
160
- ],
161
- })
162
- .llmResponses(parseNodeId("agent_c"), {
163
- model: "claude-sonnet-4-20250514",
164
- input: [
165
- { type: "message", role: "system", content: [{ type: "text", text: "You are Agent C." }] },
166
- { type: "message", role: "user", content: [{ type: "text", text: "Write 3 alternative headlines." }] },
167
- ],
168
- })
169
- .joinAll(parseNodeId("join"))
170
- .llmResponses(
171
- parseNodeId("aggregate"),
172
- {
173
- model: "claude-sonnet-4-20250514",
174
- input: [
175
- {
176
- type: "message",
177
- role: "system",
178
- content: [{ type: "text", text: "Synthesize the best answer from the following agent outputs (JSON)." }],
179
- },
180
- { type: "message", role: "user", content: [{ type: "text", text: "" }] }, // overwritten by bindings
181
- ],
182
- },
183
- {
184
- // Bind the join output into the aggregator prompt (fan-in).
185
- bindings: [
186
- {
187
- from: parseNodeId("join"),
188
- to: "/input/1/content/0/text",
189
- encoding: "json_string",
190
- } satisfies LLMResponsesBindingV0,
191
- ],
192
- },
193
- )
194
- .edge(parseNodeId("agent_a"), parseNodeId("join"))
195
- .edge(parseNodeId("agent_b"), parseNodeId("join"))
196
- .edge(parseNodeId("agent_c"), parseNodeId("join"))
197
- .edge(parseNodeId("join"), parseNodeId("aggregate"))
198
- .output(parseOutputName("result"), parseNodeId("aggregate"))
149
+ const translateReq = mr.responses
150
+ .new()
151
+ .model("claude-sonnet-4-20250514")
152
+ .system("Translate the input to French.")
153
+ .user("") // Bound from previous step
199
154
  .build();
200
155
 
201
- const { run_id } = await mr.runs.create(spec);
156
+ const spec = chain("summarize-translate")
157
+ .step(llmStep("summarize", summarizeReq))
158
+ .step(llmStep("translate", translateReq).withStream())
159
+ .outputLast("result")
160
+ .build();
161
+ ```
202
162
 
203
- const events = await mr.runs.events(run_id);
204
- for await (const ev of events) {
205
- if (ev.type === "run_completed") {
206
- const status = await mr.runs.get(run_id);
207
- console.log("outputs:", status.outputs);
208
- console.log("cost_summary:", status.cost_summary);
209
- }
210
- }
163
+ ### Parallel (Fan-out with Aggregation)
164
+
165
+ Concurrent LLM calls with optional aggregation:
166
+
167
+ ```ts
168
+ import { parallel, llmStep } from "@modelrelay/sdk";
169
+
170
+ const gpt4Req = mr.responses.new().model("gpt-4.1").user("Analyze this...").build();
171
+ const claudeReq = mr.responses.new().model("claude-sonnet-4-20250514").user("Analyze this...").build();
172
+ const synthesizeReq = mr.responses
173
+ .new()
174
+ .model("claude-sonnet-4-20250514")
175
+ .system("Synthesize the analyses into a unified view.")
176
+ .user("") // Bound from join output
177
+ .build();
178
+
179
+ const spec = parallel("multi-model-compare")
180
+ .step(llmStep("gpt4", gpt4Req))
181
+ .step(llmStep("claude", claudeReq))
182
+ .aggregate("synthesize", synthesizeReq)
183
+ .output("result", "synthesize")
184
+ .build();
211
185
  ```
212
186
 
213
- See the full example in `sdk/ts/examples/workflows_multi_agent.ts`.
187
+ ### MapReduce (Parallel Map with Reduce)
188
+
189
+ Process items in parallel, then combine results:
190
+
191
+ ```ts
192
+ import { mapReduce } from "@modelrelay/sdk";
193
+
194
+ const combineReq = mr.responses
195
+ .new()
196
+ .model("claude-sonnet-4-20250514")
197
+ .system("Combine summaries into a cohesive overview.")
198
+ .user("") // Bound from join output
199
+ .build();
200
+
201
+ const spec = mapReduce("summarize-docs")
202
+ .item("doc1", doc1Req)
203
+ .item("doc2", doc2Req)
204
+ .item("doc3", doc3Req)
205
+ .reduce("combine", combineReq)
206
+ .output("result", "combine")
207
+ .build();
208
+ ```
214
209
 
215
210
  ## Chat-Like Text Helpers
216
211