@modelrelay/sdk 5.1.0 → 8.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,6 +4,91 @@
4
4
  bun add @modelrelay/sdk
5
5
  ```
6
6
 
7
+ ## Convenience API
8
+
9
+ The simplest way to get started. Three methods cover the most common use cases:
10
+
11
+ ### Ask — Get a Quick Answer
12
+
13
+ ```typescript
14
+ import { ModelRelay } from "@modelrelay/sdk";
15
+
16
+ const mr = ModelRelay.fromSecretKey(process.env.MODELRELAY_API_KEY!);
17
+
18
+ const answer = await mr.ask("claude-sonnet-4-5", "What is 2 + 2?");
19
+ console.log(answer); // "4"
20
+ ```
21
+
22
+ ### Chat — Full Response with Metadata
23
+
24
+ ```typescript
25
+ const response = await mr.chat("claude-sonnet-4-5", "Explain quantum computing", {
26
+ system: "You are a physics professor",
27
+ });
28
+
29
+ console.log(response.output);
30
+ console.log("Tokens:", response.usage.totalTokens);
31
+ ```
32
+
33
+ ### Agent — Agentic Tool Loops
34
+
35
+ Run an agent that automatically executes tools until completion:
36
+
37
+ ```typescript
38
+ import { z } from "zod";
39
+
40
+ const tools = mr
41
+ .tools()
42
+ .add(
43
+ "read_file",
44
+ "Read a file from the filesystem",
45
+ z.object({ path: z.string().describe("File path to read") }),
46
+ async (args) => {
47
+ const content = await fs.readFile(args.path, "utf-8");
48
+ return content;
49
+ }
50
+ );
51
+
52
+ const result = await mr.agent("claude-sonnet-4-5", {
53
+ tools,
54
+ prompt: "Read config.json and summarize it",
55
+ system: "You are a helpful file assistant",
56
+ });
57
+
58
+ console.log(result.output);
59
+ console.log("Tool calls:", result.usage.toolCalls);
60
+ ```
61
+
62
+ ### User Interaction — `user.ask`
63
+
64
+ Use the built-in `user.ask` tool to request human input in a workflow run:
65
+
66
+ ```typescript
67
+ import { ModelRelay, ToolRegistry, ToolRunner, createUserAskTool } from "@modelrelay/sdk";
68
+
69
+ const mr = ModelRelay.fromSecretKey(process.env.MODELRELAY_API_KEY!);
70
+
71
+ const tools = [createUserAskTool()];
72
+ const registry = new ToolRegistry();
73
+
74
+ const runner = new ToolRunner({
75
+ registry,
76
+ runsClient: mr.runs,
77
+ onUserAsk: async (_pending, args) => {
78
+ const answer = await promptUser(args.question); // your UI/input here
79
+ return { answer, is_freeform: true };
80
+ },
81
+ });
82
+
83
+ const run = await mr.runs.create(spec);
84
+
85
+ for await (const event of mr.runs.events(run.run_id)) {
86
+ if (event.type === "node_waiting") {
87
+ await runner.handleNodeWaiting(run.run_id, event.node_id, event.waiting);
88
+ }
89
+ }
90
+ ```
91
+
7
92
  ## Token Providers (Automatic Bearer Auth)
8
93
 
9
94
  Use token providers when you want the SDK to automatically obtain/refresh **bearer tokens** for data-plane calls like `/responses` and `/runs`.
@@ -98,81 +183,73 @@ const text = await mr.responses.textForCustomer({
98
183
 
99
184
  ## Workflows
100
185
 
101
- High-level helpers for common workflow patterns:
102
-
103
- ### Chain (Sequential)
186
+ Build multi-step AI pipelines with the workflow helpers.
104
187
 
105
- Sequential LLM calls where each step's output feeds the next step's input:
188
+ ### Sequential Chain
106
189
 
107
190
  ```ts
108
- import { chain, llmStep } from "@modelrelay/sdk";
191
+ import { chain, llm } from "@modelrelay/sdk";
109
192
 
110
- const summarizeReq = mr.responses
111
- .new()
112
- .model("claude-sonnet-4-5")
113
- .system("Summarize the input concisely.")
114
- .user("The quick brown fox...")
193
+ const spec = chain([
194
+ llm("summarize", (n) => n.system("Summarize.").user("{{task}}")),
195
+ llm("translate", (n) => n.system("Translate to French.").user("{{summarize}}")),
196
+ ], { name: "summarize-translate", model: "claude-sonnet-4-5" })
197
+ .output("result", "translate")
115
198
  .build();
116
199
 
117
- const translateReq = mr.responses
118
- .new()
119
- .model("claude-sonnet-4-5")
120
- .system("Translate the input to French.")
121
- .user("") // Bound from previous step
122
- .build();
200
+ const { run_id } = await mr.runs.create(spec);
201
+ ```
202
+
203
+ ### Parallel with Aggregation
123
204
 
124
- const spec = chain("summarize-translate")
125
- .step(llmStep("summarize", summarizeReq))
126
- .step(llmStep("translate", translateReq).withStream())
127
- .outputLast("result")
205
+ ```ts
206
+ import { parallel, llm } from "@modelrelay/sdk";
207
+
208
+ const spec = parallel([
209
+ llm("agent_a", (n) => n.user("Write 3 ideas for {{task}}.")),
210
+ llm("agent_b", (n) => n.user("Write 3 objections for {{task}}.")),
211
+ ], { name: "multi-agent", model: "claude-sonnet-4-5" })
212
+ .llm("aggregate", (n) => n.system("Synthesize.").user("{{join}}"))
213
+ .edge("join", "aggregate")
214
+ .output("result", "aggregate")
128
215
  .build();
129
216
  ```
130
217
 
131
- ### Parallel (Fan-out with Aggregation)
218
+ ### Precompiled Workflows
132
219
 
133
- Concurrent LLM calls with optional aggregation:
220
+ For workflows that run repeatedly, compile once and reuse:
134
221
 
135
222
  ```ts
136
- import { parallel, llmStep } from "@modelrelay/sdk";
137
-
138
- const gpt4Req = mr.responses.new().model("gpt-4.1").user("Analyze this...").build();
139
- const claudeReq = mr.responses.new().model("claude-sonnet-4-5").user("Analyze this...").build();
140
- const synthesizeReq = mr.responses
141
- .new()
142
- .model("claude-sonnet-4-5")
143
- .system("Synthesize the analyses into a unified view.")
144
- .user("") // Bound from join output
145
- .build();
146
-
147
- const spec = parallel("multi-model-compare")
148
- .step(llmStep("gpt4", gpt4Req))
149
- .step(llmStep("claude", claudeReq))
150
- .aggregate("synthesize", synthesizeReq)
151
- .output("result", "synthesize")
152
- .build();
223
+ // Compile once
224
+ const { plan_hash } = await mr.workflows.compile(spec);
225
+
226
+ // Run multiple times with different inputs
227
+ for (const task of tasks) {
228
+ const run = await mr.runs.createFromPlan(plan_hash, {
229
+ input: { task },
230
+ });
231
+ }
153
232
  ```
154
233
 
155
- ### MapReduce (Parallel Map with Reduce)
234
+ ### Plugins (Workflows)
156
235
 
157
- Process items in parallel, then combine results:
236
+ Load GitHub-hosted plugins (markdown commands + agents), convert to workflows via `/responses`, then run them with `/runs`:
158
237
 
159
238
  ```ts
160
- import { mapReduce } from "@modelrelay/sdk";
239
+ import { ModelRelay, OrchestrationModes } from "@modelrelay/sdk";
240
+ import { createLocalFSTools } from "@modelrelay/sdk/node";
161
241
 
162
- const combineReq = mr.responses
163
- .new()
164
- .model("claude-sonnet-4-5")
165
- .system("Combine summaries into a cohesive overview.")
166
- .user("") // Bound from join output
167
- .build();
242
+ const mr = ModelRelay.fromSecretKey(process.env.MODELRELAY_API_KEY!);
243
+ const tools = createLocalFSTools({ root: process.cwd() });
168
244
 
169
- const spec = mapReduce("summarize-docs")
170
- .item("doc1", doc1Req)
171
- .item("doc2", doc2Req)
172
- .item("doc3", doc3Req)
173
- .reduce("combine", combineReq)
174
- .output("result", "combine")
175
- .build();
245
+ const plugin = await mr.plugins.load("github.com/your-org/your-plugin");
246
+ const result = await mr.plugins.run(plugin, "run", {
247
+ userTask: "Summarize the repo and suggest next steps.",
248
+ orchestrationMode: OrchestrationModes.Dynamic,
249
+ toolRegistry: tools,
250
+ });
251
+
252
+ console.log(result.outputs?.result);
176
253
  ```
177
254
 
178
255
  ## Chat-Like Text Helpers