@blockrun/clawrouter 0.12.33 → 0.12.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -119,9 +119,17 @@ Edit existing images with `/img2img` — pass a local file and describe what to
119
119
  | `--model <model>` | No | Model to use (default: `gpt-image-1`) |
120
120
  | `--size <WxH>` | No | Output size (default: `1024x1024`) |
121
121
 
122
- Supported model: `gpt-image-1` (OpenAI GPT Image 1, $0.02/image). ClawRouter reads the local file, converts it to base64, and sends it to BlockRun's `/v1/images/image2image` endpoint with automatic x402 payment.
122
+ Supported model: `gpt-image-1` (OpenAI GPT Image 1, $0.02/image).
123
123
 
124
- **API endpoint:** `POST http://localhost:8402/v1/images/image2image` is also available for programmatic use see [Image Generation docs](docs/image-generation.md#post-v1imagesimage2image) for API reference and code examples.
124
+ **API endpoint:** `POST http://localhost:8402/v1/images/image2image` — accepts local file paths, URLs, or base64 data URIs:
125
+
126
+ ```bash
127
+ curl -X POST http://localhost:8402/v1/images/image2image \
128
+ -H "Content-Type: application/json" \
129
+ -d '{"prompt":"add sunglasses","image":"~/photo.png"}'
130
+ ```
131
+
132
+ See [Image Generation & Editing docs](docs/image-generation.md#post-v1imagesimage2image) for full API reference and code examples.
125
133
 
126
134
  ---
127
135
 
package/dist/cli.js CHANGED
@@ -5949,7 +5949,34 @@ async function startProxy(options) {
5949
5949
  for await (const chunk of req) {
5950
5950
  chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
5951
5951
  }
5952
- const reqBody = Buffer.concat(chunks);
5952
+ const rawBody = Buffer.concat(chunks);
5953
+ let reqBody;
5954
+ try {
5955
+ const parsed = JSON.parse(rawBody.toString());
5956
+ for (const field of ["image", "mask"]) {
5957
+ const val = parsed[field];
5958
+ if (typeof val !== "string" || !val) continue;
5959
+ if (val.startsWith("data:")) {
5960
+ } else if (val.startsWith("https://") || val.startsWith("http://")) {
5961
+ const imgResp = await fetch(val);
5962
+ if (!imgResp.ok) throw new Error(`Failed to download ${field} from ${val}: HTTP ${imgResp.status}`);
5963
+ const contentType = imgResp.headers.get("content-type") ?? "image/png";
5964
+ const buf = Buffer.from(await imgResp.arrayBuffer());
5965
+ parsed[field] = `data:${contentType};base64,${buf.toString("base64")}`;
5966
+ console.log(`[ClawRouter] img2img: downloaded ${field} URL \u2192 data URI (${buf.length} bytes)`);
5967
+ } else {
5968
+ parsed[field] = readImageFileAsDataUri(val);
5969
+ console.log(`[ClawRouter] img2img: read ${field} file \u2192 data URI`);
5970
+ }
5971
+ }
5972
+ if (!parsed.model) parsed.model = "openai/gpt-image-1";
5973
+ reqBody = JSON.stringify(parsed);
5974
+ } catch (parseErr) {
5975
+ const msg = parseErr instanceof Error ? parseErr.message : String(parseErr);
5976
+ res.writeHead(400, { "Content-Type": "application/json" });
5977
+ res.end(JSON.stringify({ error: "Invalid request", details: msg }));
5978
+ return;
5979
+ }
5953
5980
  try {
5954
5981
  const upstream = await payFetch(`${apiBase}/v1/images/image2image`, {
5955
5982
  method: "POST",