ruflo 3.6.12 → 3.6.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/package.json +4 -1
  2. package/src/ruvocal/.claude-flow/data/pending-insights.jsonl +25 -0
  3. package/src/ruvocal/.claude-flow/neural/stats.json +6 -0
  4. package/src/ruvocal/.dockerignore +5 -1
  5. package/src/ruvocal/.gcloudignore +18 -0
  6. package/src/ruvocal/README.md +107 -133
  7. package/src/ruvocal/cloudbuild.yaml +68 -0
  8. package/src/ruvocal/config/branding.env.example +19 -0
  9. package/src/ruvocal/mcp-bridge/index.js +15 -1
  10. package/src/ruvocal/src/lib/components/FoundationBackground.svelte +242 -0
  11. package/src/ruvocal/src/lib/components/NavMenu.svelte +18 -0
  12. package/src/ruvocal/src/lib/components/RufloHelpModal.svelte +411 -0
  13. package/src/ruvocal/src/lib/components/chat/ChatWindow.svelte +122 -4
  14. package/src/ruvocal/src/lib/components/wasm/GalleryPanel.svelte +357 -0
  15. package/src/ruvocal/src/lib/constants/mcpExamples.ts +56 -77
  16. package/src/ruvocal/src/lib/constants/routerExamples.ts +51 -127
  17. package/src/ruvocal/src/lib/constants/rvagentPresets.ts +206 -0
  18. package/src/ruvocal/src/lib/server/textGeneration/mcp/wasmTools.test.ts +633 -0
  19. package/src/ruvocal/src/lib/stores/mcpServers.ts +195 -6
  20. package/src/ruvocal/src/lib/stores/wasmMcp.ts +472 -0
  21. package/src/ruvocal/src/lib/types/Settings.ts +7 -0
  22. package/src/ruvocal/src/lib/types/Tool.ts +4 -1
  23. package/src/ruvocal/src/lib/wasm/idb.ts +438 -0
  24. package/src/ruvocal/src/lib/wasm/index.ts +1213 -0
  25. package/src/ruvocal/src/lib/wasm/tests/wasm-capabilities.test.ts +565 -0
  26. package/src/ruvocal/src/lib/wasm/wasm.worker.ts +332 -0
  27. package/src/ruvocal/src/lib/wasm/workerClient.ts +166 -0
  28. package/src/ruvocal/static/wasm/rvagent_wasm.js +1539 -0
  29. package/src/ruvocal/static/wasm/rvagent_wasm_bg.wasm +0 -0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ruflo",
3
- "version": "3.6.12",
3
+ "version": "3.6.13",
4
4
  "description": "Ruflo - Enterprise AI agent orchestration platform. Deploy 60+ specialized agents in coordinated swarms with self-learning, fault-tolerant consensus, vector memory, and MCP integration",
5
5
  "main": "bin/ruflo.js",
6
6
  "type": "module",
@@ -41,6 +41,9 @@
41
41
  "dependencies": {
42
42
  "@claude-flow/cli": ">=3.0.0-alpha.1"
43
43
  },
44
+ "overrides": {
45
+ "@ruvector/rvf-wasm": "0.1.5"
46
+ },
44
47
  "engines": {
45
48
  "node": ">=20.0.0"
46
49
  },
@@ -0,0 +1,25 @@
1
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/cloudbuild.yaml","timestamp":1777643997390}
2
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777647325711}
3
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/mcp-bridge/index.js","timestamp":1777647484755}
4
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777648109715}
5
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/constants/mcpExamples.ts","timestamp":1777648302383}
6
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777648333969}
7
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777648385544}
8
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/components/RufloHelpModal.svelte","timestamp":1777648491648}
9
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/components/NavMenu.svelte","timestamp":1777648496824}
10
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/components/NavMenu.svelte","timestamp":1777648507341}
11
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/components/NavMenu.svelte","timestamp":1777648513732}
12
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/components/NavMenu.svelte","timestamp":1777648515534}
13
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/constants/routerExamples.ts","timestamp":1777648661926}
14
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/mcp-bridge/index.js","timestamp":1777648734846}
15
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777648943539}
16
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777649358188}
17
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777649495186}
18
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/README.md","timestamp":1777649945240}
19
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/src/ruvocal/src/lib/wasm/wasm.worker.ts","timestamp":1777651016480}
20
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/docs/adr/ADR-033-RUVOCAL-WASM-MCP-INTEGRATION.md","timestamp":1777656145721}
21
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/ruflo/docs/adr/ADR-033-RUVOCAL-WASM-MCP-INTEGRATION.md","timestamp":1777656159040}
22
+ {"type":"edit","file":"/tmp/ruvocal-env.yaml","timestamp":1777670170817}
23
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/v3/goal_ui/src/pages/NotFound.tsx","timestamp":1777698043248}
24
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/v3/goal_ui/.optimization-plan.md","timestamp":1777698076855}
25
+ {"type":"edit","file":"/Users/cohen/Projects/ruflo/v3/goal_ui/.optimization-plan.md","timestamp":1777698095555}
@@ -0,0 +1,6 @@
1
+ {
2
+ "trajectoriesRecorded": 0,
3
+ "patternsLearned": 0,
4
+ "signalsProcessed": 0,
5
+ "lastAdaptation": null
6
+ }
@@ -10,4 +10,8 @@ node_modules/
10
10
  !.env
11
11
  .env.local
12
12
  db
13
- models/**
13
+ # NOTE: previously had `models/**` here to ignore a top-level placeholder
14
+ # directory. Even when anchored (`/models/**`), Docker's pattern matcher
15
+ # was eliding nested src/routes/api/v2/models/ and src/routes/models/
16
+ # from the build context, producing prod images missing those routes.
17
+ # The placeholder dir is small (single .txt) — letting it through is fine.
@@ -0,0 +1,18 @@
1
+ # gcloud builds submit upload filter.
2
+ # Without this file, gcloud falls back to .gitignore, which has
3
+ # `models/*` (line 16) — that excludes src/routes/api/v2/models/*
4
+ # and src/routes/models/* from the build context, producing
5
+ # production images missing those SvelteKit routes (root cause
6
+ # of /api/v2/models 404 → /favicon 500 → / 500 in Cloud Run).
7
+
8
+ .git/
9
+ .gitignore
10
+ .dockerignore
11
+ .gcloudignore
12
+ node_modules/
13
+ .svelte-kit/
14
+ build/
15
+ .env.local
16
+ .vscode/
17
+ .idea/
18
+ *.log
@@ -1,190 +1,164 @@
1
- # Chat UI
1
+ # RuVocal — RuFlo Web UI
2
2
 
3
- ![Chat UI repository thumbnail](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/chat-ui-2026.png)
3
+ > RuFlo's multi-model AI chat with built-in Model Context Protocol (MCP) tool calling. Live at [**flo.ruv.io**](https://flo.ruv.io/).
4
4
 
5
- A chat interface for LLMs. It is a SvelteKit app and it powers the [HuggingChat app on hf.co/chat](https://huggingface.co/chat).
5
+ [![Try the Web UI flo.ruv.io](https://img.shields.io/badge/✨_Try_it-flo.ruv.io-6366f1?style=for-the-badge&logo=svelte&logoColor=white)](https://flo.ruv.io/)
6
6
 
7
- 0. [Quickstart](#quickstart)
8
- 1. [Database Options](#database-options)
9
- 2. [Launch](#launch)
10
- 3. [Optional Docker Image](#optional-docker-image)
11
- 4. [Extra parameters](#extra-parameters)
12
- 5. [Building](#building)
7
+ RuVocal is the SvelteKit web app that lets you chat with Qwen, Claude, Gemini, or OpenAI while [RuFlo](https://github.com/ruvnet/ruflo) invokes the same ~210 MCP tools the CLI uses — agent orchestration, persistent memory, swarm coordination, code review, GitHub ops — all directly from chat. No install, no API key needed to try the hosted demo.
13
8
 
14
- > [!NOTE]
15
- > Chat UI only supports OpenAI-compatible APIs via `OPENAI_BASE_URL` and the `/models` endpoint. Provider-specific integrations (legacy `MODELS` env var, GGUF discovery, embeddings, web-search helpers, etc.) are removed, but any service that speaks the OpenAI protocol (llama.cpp server, Ollama, OpenRouter, etc. will work by default).
9
+ It started as a fork of the [HuggingFace chat-ui](https://github.com/huggingface/chat-ui) v0.20.0 and has been extended with a WASM-MCP integration layer, parallel tool execution, an in-browser tool gallery, and a "RuFlo Capabilities" tour modal. See [ADR-033](../../docs/adr/ADR-033-RUVOCAL-WASM-MCP-INTEGRATION.md) for the architecture.
16
10
 
17
- > [!NOTE]
18
- > The old version is still available on the [legacy branch](https://github.com/huggingface/chat-ui/tree/legacy)
11
+ ## What RuVocal adds on top of upstream chat-ui
19
12
 
20
- ## Quickstart
13
+ | | |
14
+ |---|---|
15
+ | 🛠️ **~210 MCP tools, prefixed** | Five RuFlo server groups (Core, Intelligence, Agents, Memory, DevTools) plus a 18-tool in-browser WASM gallery |
16
+ | ⚡ **Parallel tool calls** | One model turn fires 4–6+ tools at once via `Promise.all`. The UI shows a *Step N — X tools completed* badge per turn |
17
+ | 📘 **RuFlo Capabilities modal** | Question-mark icon → multi-section tour: models, tools, architecture, shortcuts |
18
+ | 💾 **AgentDB-backed memory** | "Remember my favorite color is indigo" → recalled weeks later via HNSW vector search |
19
+ | 🧠 **6 curated frontier models** | Qwen 3.6 Max (default), Claude Sonnet 4.6, Claude Haiku 4.5, Gemini 2.5 Pro, Gemini 2.5 Flash, OpenAI — via OpenRouter |
20
+ | 🔌 **Bring-your-own MCP servers** | Add HTTP/SSE/stdio endpoints from the chat input; they join the parallel-execution flow |
21
+ | 🦾 **ruvLLM ready** | Native support for [ruvLLM](https://github.com/ruvnet/RuVector/tree/main/examples/ruvLLM) — RuFlo's self-improving local model layer |
22
+ | 🏠 **Self-hostable** | Multi-stage Dockerfile (`INCLUDE_DB=true` builds in MongoDB), `cloudbuild.yaml` for Google Cloud Run |
21
23
 
22
- Chat UI speaks to OpenAI-compatible APIs only. The fastest way to get running is with the Hugging Face Inference Providers router plus your personal Hugging Face access token.
24
+ ## Quick Start
23
25
 
24
- **Step 1 Create `.env.local`:**
26
+ ### Hosted (zero install)
25
27
 
26
- ```env
27
- OPENAI_BASE_URL=https://router.huggingface.co/v1
28
- OPENAI_API_KEY=hf_************************
29
- ```
30
-
31
- `OPENAI_API_KEY` can come from any OpenAI-compatible endpoint you plan to call. Pick the combo that matches your setup and drop the values into `.env.local`:
32
-
33
- | Provider | Example `OPENAI_BASE_URL` | Example key env |
34
- | --------------------------------------------- | ---------------------------------- | ----------------------------------------------------------------------- |
35
- | Hugging Face Inference Providers router | `https://router.huggingface.co/v1` | `OPENAI_API_KEY=hf_xxx` (or `HF_TOKEN` legacy alias) |
36
- | llama.cpp server (`llama.cpp --server --api`) | `http://127.0.0.1:8080/v1` | `OPENAI_API_KEY=sk-local-demo` (any string works; llama.cpp ignores it) |
37
- | Ollama (with OpenAI-compatible bridge) | `http://127.0.0.1:11434/v1` | `OPENAI_API_KEY=ollama` |
38
- | OpenRouter | `https://openrouter.ai/api/v1` | `OPENAI_API_KEY=sk-or-v1-...` |
39
- | Poe | `https://api.poe.com/v1` | `OPENAI_API_KEY=pk_...` |
40
-
41
- Check the root [`.env` template](./.env) for the full list of optional variables you can override.
28
+ [**flo.ruv.io**](https://flo.ruv.io/) — pick a model, type a question. That's it.
42
29
 
43
- **Step 2 – Install and launch the dev server:**
30
+ ### Local dev
44
31
 
45
32
  ```bash
46
- git clone https://github.com/huggingface/chat-ui
47
- cd chat-ui
33
+ git clone https://github.com/ruvnet/ruflo
34
+ cd ruflo/ruflo/src/ruvocal
35
+ cp .env .env.local # then edit .env.local — see below
48
36
  npm install
49
- npm run dev -- --open
37
+ npm run dev # http://localhost:5173
50
38
  ```
51
39
 
52
- You now have Chat UI running locally. Open the browser and start chatting.
53
-
54
- ## Database Options
55
-
56
- Chat history, users, settings, files, and stats all live in MongoDB. You can point Chat UI at any MongoDB 6/7 deployment.
57
-
58
- > [!TIP]
59
- > For quick local development, you can skip this section. When `MONGODB_URL` is not set, Chat UI falls back to an embedded MongoDB that persists to `./db`.
60
-
61
- ### MongoDB Atlas (managed)
40
+ Minimum `.env.local` to use OpenRouter (matches the hosted setup):
62
41
 
63
- 1. Create a free cluster at [mongodb.com](https://www.mongodb.com/pricing).
64
- 2. Add your IP (or `0.0.0.0/0` for development) to the network access list.
65
- 3. Create a database user and copy the connection string.
66
- 4. Paste that string into `MONGODB_URL` in `.env.local`. Keep the default `MONGODB_DB_NAME=chat-ui` or change it per environment.
67
-
68
- Atlas keeps MongoDB off your laptop, which is ideal for teams or cloud deployments.
69
-
70
- ### Local MongoDB (container)
71
-
72
- If you prefer to run MongoDB in a container:
42
+ ```env
43
+ OPENAI_BASE_URL=https://openrouter.ai/api/v1
44
+ OPENAI_API_KEY=sk-or-v1-...
73
45
 
74
- ```bash
75
- docker run -d -p 27017:27017 --name mongo-chatui mongo:latest
46
+ # Curated RuFlo model list (optional — defaults to /models from the base URL)
47
+ TASK_MODEL=qwen/qwen3.6-max-preview
48
+ PUBLIC_APP_NAME=RuFlo
49
+ PUBLIC_APP_DESCRIPTION="Intelligent workflow automation assistant powered by Claude/Gemini/Qwen and MCP tools."
76
50
  ```
77
51
 
78
- Then set `MONGODB_URL=mongodb://localhost:27017` in `.env.local`.
52
+ Any OpenAI-compatible endpoint works (vLLM, Ollama, LM Studio, llama.cpp, Together, Groq, self-hosted, …):
79
53
 
80
- ## Launch
54
+ | Provider | `OPENAI_BASE_URL` | Key |
55
+ | --- | --- | --- |
56
+ | OpenRouter | `https://openrouter.ai/api/v1` | `sk-or-v1-...` |
57
+ | Hugging Face router | `https://router.huggingface.co/v1` | `hf_xxx` |
58
+ | llama.cpp server | `http://127.0.0.1:8080/v1` | any string |
59
+ | Ollama (OAI bridge) | `http://127.0.0.1:11434/v1` | `ollama` |
60
+ | Poe | `https://api.poe.com/v1` | `pk_...` |
81
61
 
82
- After configuring your environment variables, start Chat UI with:
62
+ ### Docker (with embedded MongoDB)
83
63
 
84
64
  ```bash
85
- npm install
86
- npm run dev
65
+ docker build -t ruvocal --build-arg INCLUDE_DB=true .
66
+ docker run -p 3000:3000 \
67
+ -e OPENAI_BASE_URL=https://openrouter.ai/api/v1 \
68
+ -e OPENAI_API_KEY=sk-or-v1-... \
69
+ -v ruvocal-data:/data \
70
+ ruvocal
87
71
  ```
88
72
 
89
- The dev server listens on `http://localhost:5173` by default. Use `npm run build` / `npm run preview` for production builds.
73
+ ### Google Cloud Run (production-style)
90
74
 
91
- ## Optional Docker Image
75
+ `cloudbuild.yaml` does a multi-stage build with `INCLUDE_DB=true`, pushes to Artifact Registry, and deploys to Cloud Run. Hosted demo at `flo.ruv.io` runs from this exact pipeline. See [`cloudbuild.yaml`](./cloudbuild.yaml) and the deploy notes in [ADR-033](../../docs/adr/ADR-033-RUVOCAL-WASM-MCP-INTEGRATION.md).
92
76
 
93
- The `chat-ui-db` image bundles MongoDB inside the container:
77
+ ## Database
94
78
 
95
- ```bash
96
- docker run \
97
- -p 3000:3000 \
98
- -e OPENAI_BASE_URL=https://router.huggingface.co/v1 \
99
- -e OPENAI_API_KEY=hf_*** \
100
- -v chat-ui-data:/data \
101
- ghcr.io/huggingface/chat-ui-db:latest
102
- ```
79
+ Chat history, users, settings, files, and stats live in MongoDB. Three options:
103
80
 
104
- All environment variables accepted in `.env.local` can be provided as `-e` flags.
81
+ - **Embedded (zero-config)** omit `MONGODB_URL`; the app uses `MongoMemoryServer` and persists to `./db`. Good for local dev and the `INCLUDE_DB=true` Docker path.
82
+ - **MongoDB Atlas (managed)** — free cluster at [mongodb.com](https://www.mongodb.com/pricing), allow-list your IP, set `MONGODB_URL` to the connection string.
83
+ - **Local container** — `docker run -d -p 27017:27017 --name mongo-ruvocal mongo:latest` then `MONGODB_URL=mongodb://localhost:27017`.
105
84
 
106
- ## Extra parameters
85
+ `MONGODB_DB_NAME` defaults to `chat-ui` (kept for upstream compatibility); change per environment.
107
86
 
108
- ### Theming
87
+ ## MCP Tools (the RuFlo difference)
109
88
 
110
- You can use a few environment variables to customize the look and feel of chat-ui. These are by default:
89
+ RuVocal calls tools exposed by Model Context Protocol servers and feeds results back to the model via OpenAI function calling. Configure trusted servers via env, let users add their own, and the router auto-selects tools-capable models when needed.
111
90
 
112
91
  ```env
113
- PUBLIC_APP_NAME=ChatUI
114
- PUBLIC_APP_ASSETS=chatui
115
- PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone."
116
- PUBLIC_APP_DATA_SHARING=
92
+ MCP_SERVERS=[
93
+ {"name":"RuFlo Core","url":"https://mcp-bridge-...run.app/mcp/core","transport":"sse"},
94
+ {"name":"RuFlo Intelligence","url":"https://mcp-bridge-...run.app/mcp/intelligence","transport":"sse"},
95
+ {"name":"RuFlo Agents","url":"https://mcp-bridge-...run.app/mcp/agents","transport":"sse"},
96
+ {"name":"RuFlo Memory","url":"https://mcp-bridge-...run.app/mcp/memory","transport":"sse"},
97
+ {"name":"RuFlo DevTools","url":"https://mcp-bridge-...run.app/mcp/devtools","transport":"sse"},
98
+ {"name":"π Shared Brain","url":"https://mcp.pi.ruv.io","transport":"streamable-http"}
99
+ ]
117
100
  ```
118
101
 
119
- - `PUBLIC_APP_NAME` The name used as a title throughout the app.
120
- - `PUBLIC_APP_ASSETS` Is used to find logos & favicons in `static/$PUBLIC_APP_ASSETS`, current options are `chatui` and `huggingchat`.
121
- - `PUBLIC_APP_DATA_SHARING` Can be set to 1 to add a toggle in the user settings that lets your users opt-in to data sharing with models creator.
122
-
123
- ### Models
124
-
125
- Models are discovered from `${OPENAI_BASE_URL}/models`, and you can optionally override their metadata via the `MODELS` env var (JSON5). Legacy provider‑specific integrations and GGUF discovery are removed. Authorization uses `OPENAI_API_KEY` (preferred). `HF_TOKEN` remains a legacy alias.
102
+ In the chat UI: **MCP (n)** pill in the chat input → *Add Server* to drop in any HTTP/SSE/stdio endpoint. Run a local MCP server on `localhost:3000` and it just works.
126
103
 
127
- ### LLM Router (Optional)
104
+ When a model calls a tool, the message shows a compact card with parameters, a progress bar while running, and the result. Multiple tools in the same turn render as a parallel-execution group.
128
105
 
129
- Chat UI can perform server-side smart routing using [katanemo/Arch-Router-1.5B](https://huggingface.co/katanemo/Arch-Router-1.5B) as the routing model without running a separate router service. The UI exposes a virtual model alias called "Omni" (configurable) that, when selected, chooses the best route/model for each message.
106
+ ## LLM Router (Omni)
130
107
 
131
- - Provide a routes policy JSON via `LLM_ROUTER_ROUTES_PATH`. No sample file ships with this branch, so you must point the variable to a JSON array you create yourself (for example, commit one in your project like `config/routes.chat.json`). Each route entry needs `name`, `description`, `primary_model`, and optional `fallback_models`.
132
- - Configure the Arch router selection endpoint with `LLM_ROUTER_ARCH_BASE_URL` (OpenAI-compatible `/chat/completions`) and `LLM_ROUTER_ARCH_MODEL` (e.g. `router/omni`). The Arch call reuses `OPENAI_API_KEY` for auth.
133
- - Map `other` to a concrete route via `LLM_ROUTER_OTHER_ROUTE` (default: `casual_conversation`). If Arch selection fails, calls fall back to `LLM_ROUTER_FALLBACK_MODEL`.
134
- - Selection timeout can be tuned via `LLM_ROUTER_ARCH_TIMEOUT_MS` (default 10000).
135
- - Omni alias configuration: `PUBLIC_LLM_ROUTER_ALIAS_ID` (default `omni`), `PUBLIC_LLM_ROUTER_DISPLAY_NAME` (default `Omni`), and optional `PUBLIC_LLM_ROUTER_LOGO_URL`.
108
+ RuVocal can do server-side smart routing using [katanemo/Arch-Router-1.5B](https://huggingface.co/katanemo/Arch-Router-1.5B) without a separate router service. Selecting "Omni" in the model picker:
136
109
 
137
- When you select Omni in the UI, Chat UI will:
110
+ 1. Calls Arch once (non-streaming) to pick the best route for recent turns
111
+ 2. Emits `RouterMetadata` so the UI shows route + selected model
112
+ 3. Streams from the selected model via `OPENAI_BASE_URL`; on errors, tries route fallbacks
138
113
 
139
- - Call the Arch endpoint once (non-streaming) to pick the best route for the last turns.
140
- - Emit RouterMetadata immediately (route and actual model used) so the UI can display it.
141
- - Stream from the selected model via your configured `OPENAI_BASE_URL`. On errors, it tries route fallbacks.
114
+ Shortcut paths bypass Arch:
115
+ - **Multimodal** `LLM_ROUTER_ENABLE_MULTIMODAL=true` + image attached uses `LLM_ROUTER_MULTIMODAL_MODEL`
116
+ - **Tools/Agentic** `LLM_ROUTER_ENABLE_TOOLS=true` + ≥1 MCP server enabled uses `LLM_ROUTER_TOOLS_MODEL`
142
117
 
143
- Tool and multimodal shortcuts:
118
+ Configure via `LLM_ROUTER_ROUTES_PATH` (JSON array of route entries with `name`, `description`, `primary_model`, optional `fallback_models`), `LLM_ROUTER_ARCH_BASE_URL`, `LLM_ROUTER_ARCH_MODEL`, `LLM_ROUTER_OTHER_ROUTE`, `LLM_ROUTER_FALLBACK_MODEL`, `LLM_ROUTER_ARCH_TIMEOUT_MS`.
144
119
 
145
- - Multimodal: If `LLM_ROUTER_ENABLE_MULTIMODAL=true` and the user sends an image, the router bypasses Arch and uses the model specified in `LLM_ROUTER_MULTIMODAL_MODEL`. Route name: `multimodal`.
146
- - Tools: If `LLM_ROUTER_ENABLE_TOOLS=true` and the user has at least one MCP server enabled, the router bypasses Arch and uses `LLM_ROUTER_TOOLS_MODEL`. If that model is missing or misconfigured, it falls back to Arch routing. Route name: `agentic`.
120
+ Display: `PUBLIC_LLM_ROUTER_ALIAS_ID` (default `omni`), `PUBLIC_LLM_ROUTER_DISPLAY_NAME` (default `Omni`), `PUBLIC_LLM_ROUTER_LOGO_URL`.
147
121
 
148
- ### MCP Tools (Optional)
149
-
150
- Chat UI can call tools exposed by Model Context Protocol (MCP) servers and feed results back to the model using OpenAI function calling. You can preconfigure trusted servers via env, let users add their own, and optionally have the Omni router auto‑select a tools‑capable model.
151
-
152
- Configure servers (base list for all users):
122
+ ## Theming
153
123
 
154
124
  ```env
155
- # JSON array of servers: name, url, optional headers
156
- MCP_SERVERS=[
157
- {"name": "Web Search (Exa)", "url": "https://mcp.exa.ai/mcp"},
158
- {"name": "Hugging Face MCP Login", "url": "https://hf.co/mcp?login"}
159
- ]
160
-
161
- # Forward the signed-in user's Hugging Face token to the official HF MCP login endpoint
162
- # when no Authorization header is set on that server entry.
163
- MCP_FORWARD_HF_USER_TOKEN=true
125
+ PUBLIC_APP_NAME=RuFlo
126
+ PUBLIC_APP_ASSETS=chatui
127
+ PUBLIC_APP_DESCRIPTION="Intelligent workflow automation assistant powered by Claude/Gemini/Qwen and MCP tools."
128
+ PUBLIC_APP_DATA_SHARING=
164
129
  ```
165
130
 
166
- Enable router tool path (Omni):
167
-
168
- - Set `LLM_ROUTER_ENABLE_TOOLS=true` and choose a tools‑capable target with `LLM_ROUTER_TOOLS_MODEL=<model id or name>`.
169
- - The target must support OpenAI tools/function calling. Chat UI surfaces a “tools” badge on models that advertise this; you can also force‑enable it per‑model in settings (see below).
131
+ `PUBLIC_APP_ASSETS` picks the logo/favicon directory under `static/$PUBLIC_APP_ASSETS`.
170
132
 
171
- Use tools in the UI:
133
+ ## Build
172
134
 
173
- - Open “MCP Servers” from the top‑right menu or from the `+` menu in the chat input to add servers, toggle them on, and run Health Check. The server card lists available tools.
174
- - When a model calls a tool, the message shows a compact “tool” block with parameters, a progress bar while running, and the result (or error). Results are also provided back to the model for follow‑up.
135
+ ```bash
136
+ npm run build # production bundle
137
+ npm run preview # preview the build locally
138
+ ```
175
139
 
176
- Per‑model overrides:
140
+ ## Architecture (one-pager)
177
141
 
178
- - In Settings Model, you can toggle “Tool calling (functions)” and “Multimodal input” per model. These overrides apply even if the provider metadata doesn’t advertise the capability.
142
+ - **SvelteKit 2 + Svelte 5 runes** (`$state`, `$derived`, `$effect`)
143
+ - **MongoDB** persistence with `MongoMemoryServer` fallback
144
+ - **TailwindCSS** with `scrollbar-custom` and Tailwind class sorting
145
+ - **OpenAI-compatible** model registry pulled from `${OPENAI_BASE_URL}/models`
146
+ - **MCP bridge** in `src/lib/server/mcp/`; each server group exposes its own SSE endpoint
147
+ - **WASM tool gallery** via Web Worker (`src/lib/wasm/wasm.worker.ts`), opt-in via `?worker=1`
148
+ - **Parallel tool calls** — `Promise.all` in `src/lib/server/tools/toolInvocation.ts`
149
+ - **Capabilities modal** — `src/lib/components/RufloHelpModal.svelte`
150
+ - **Dynamic follow-ups** — tool-call-aware suggested next prompts in `ChatWindow.svelte`
179
151
 
180
- ## Building
152
+ For deeper internals, see [`CLAUDE.md`](./CLAUDE.md) (Claude Code agent guide) and [ADR-033](../../docs/adr/ADR-033-RUVOCAL-WASM-MCP-INTEGRATION.md).
181
153
 
182
- To create a production version of your app:
154
+ ## Related
183
155
 
184
- ```bash
185
- npm run build
186
- ```
156
+ - 🏠 **Parent project** — [RuFlo](https://github.com/ruvnet/ruflo)
157
+ - 🎯 **Goal Planner UI** — [goal.ruv.io](https://goal.ruv.io/) · [/agents](https://goal.ruv.io/agents)
158
+ - 📖 **ADR-033** — RuVocal/WASM-MCP integration architecture
159
+ - 📋 **Roadmap** — [issue #1689](https://github.com/ruvnet/ruflo/issues/1689)
160
+ - 🍴 **Upstream** — [huggingface/chat-ui](https://github.com/huggingface/chat-ui) (RuVocal is forked from v0.20.0)
187
161
 
188
- You can preview the production build with `npm run preview`.
162
+ ## License
189
163
 
190
- > To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment.
164
+ MIT same as the parent [RuFlo](https://github.com/ruvnet/ruflo) project.
@@ -0,0 +1,68 @@
1
+ steps:
2
+ # Build the WASM-integrated ruvocal image with embedded MongoDB.
3
+ # INCLUDE_DB=true triggers the multi-stage Dockerfile path that copies
4
+ # mongod from mongo:7 and sets MONGODB_URL=mongodb://localhost:27017.
5
+ #
6
+ # The Dockerfile uses BuildKit syntax (COPY --link, --mount=type=cache),
7
+ # so DOCKER_BUILDKIT=1 must be set in the env. Without it, the parser
8
+ # rejects --link with "Unknown flag" before any build steps run.
9
+ - name: 'gcr.io/cloud-builders/docker'
10
+ env:
11
+ - 'DOCKER_BUILDKIT=1'
12
+ args: [
13
+ 'build',
14
+ '--build-arg', 'INCLUDE_DB=true',
15
+ '-t', 'gcr.io/${PROJECT_ID}/ruvocal:${_VERSION}',
16
+ '-f', 'Dockerfile',
17
+ '.'
18
+ ]
19
+
20
+ - name: 'gcr.io/cloud-builders/docker'
21
+ args: ['push', 'gcr.io/${PROJECT_ID}/ruvocal:${_VERSION}']
22
+
23
+ - name: 'gcr.io/cloud-builders/docker'
24
+ args: [
25
+ 'tag',
26
+ 'gcr.io/${PROJECT_ID}/ruvocal:${_VERSION}',
27
+ 'gcr.io/${PROJECT_ID}/ruvocal:latest'
28
+ ]
29
+ - name: 'gcr.io/cloud-builders/docker'
30
+ args: ['push', 'gcr.io/${PROJECT_ID}/ruvocal:latest']
31
+
32
+ # Deploy to Cloud Run with embedded MongoDB.
33
+ #
34
+ # NOTE: MongoDB data is ephemeral — Cloud Run's container filesystem is
35
+ # discarded on each cold start. This is acceptable for validation but
36
+ # NOT for production. For persistence, follow up with either Atlas or
37
+ # a Cloud Run multi-container revision (see ADR-033).
38
+ #
39
+ # Provider keys come from existing ruv-dev Secret Manager secrets per
40
+ # ADR-029. Only one CPU is allocated; mongo is lightweight when used
41
+ # only for chat session storage.
42
+ # Deploy with image only — preserves manually configured env vars
43
+ # (DOTENV_LOCAL with PUBLIC_ORIGIN/OPENAI_BASE_URL/MODELS, OPENAI_API_KEY
44
+ # secret mapping, etc.). Initial env config must be set out-of-band via
45
+ # `gcloud run services update` (see ADR-033 deployment notes).
46
+ - name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
47
+ entrypoint: gcloud
48
+ args: [
49
+ 'run', 'deploy', 'ruvocal',
50
+ '--image', 'gcr.io/${PROJECT_ID}/ruvocal:${_VERSION}',
51
+ '--platform', 'managed',
52
+ '--region', 'us-central1',
53
+ '--port', '3000',
54
+ '--memory', '2Gi',
55
+ '--cpu', '2',
56
+ '--min-instances', '0',
57
+ '--max-instances', '3',
58
+ '--timeout', '300',
59
+ '--allow-unauthenticated'
60
+ ]
61
+
62
+ substitutions:
63
+ _VERSION: 'v1'
64
+
65
+ options:
66
+ logging: CLOUD_LOGGING_ONLY
67
+ machineType: 'E2_HIGHCPU_8'
68
+ timeout: 1800s
@@ -0,0 +1,19 @@
1
+ # RuVector Branding Configuration
2
+ # Copy this to .env.local or add to your environment
3
+
4
+ # App name displayed throughout the UI
5
+ PUBLIC_APP_NAME=RuVector
6
+
7
+ # App description for SEO and meta tags
8
+ PUBLIC_APP_DESCRIPTION="AI-powered intelligent assistant with MCP tools, voice, multi-model support, and workflow automation. Connect to collective intelligence via RuVector."
9
+
10
+ # Assets folder (defaults to "chatui" for RuVector styling)
11
+ PUBLIC_APP_ASSETS=chatui
12
+
13
+ # Optional: Set the public origin for absolute URLs
14
+ # PUBLIC_ORIGIN=https://your-domain.com
15
+
16
+ # Theme colors (configured via CSS, not env vars)
17
+ # Primary gold: #e8a634
18
+ # Background dark: #020205
19
+ # See tailwind.config.cjs for full color palette
@@ -213,12 +213,17 @@ class StdioMcpClient {
213
213
  const msg = JSON.stringify({ jsonrpc: "2.0", id, method, params }) + "\n";
214
214
  this.pending.set(id, { resolve, reject });
215
215
  this.process.stdin.write(msg);
216
+ // initialize is the cold-start gate for backends like ruflo/ruvector
217
+ // which boot a full claude-flow / ruvector kernel — on Cloud Run with
218
+ // npx fetching artifacts it can take 45-60s. Other RPC methods are
219
+ // post-init and stay snappy.
220
+ const timeoutMs = method === "initialize" ? 120000 : 30000;
216
221
  setTimeout(() => {
217
222
  if (this.pending.has(id)) {
218
223
  this.pending.delete(id);
219
224
  reject(new Error(`${this.name} timeout for ${method}`));
220
225
  }
221
- }, 30000);
226
+ }, timeoutMs);
222
227
  });
223
228
  }
224
229
 
@@ -648,6 +653,15 @@ async function geminiGroundedSearch(query, mode = "search") {
648
653
  const apiKey = process.env.GOOGLE_API_KEY;
649
654
  if (!apiKey) return { error: "No GOOGLE_API_KEY configured for search" };
650
655
 
656
+ // Empty/missing query produces a 400 from Gemini's generateContent endpoint.
657
+ // Return a structured error so the model can recover with a real query.
658
+ if (!query || typeof query !== "string" || !query.trim()) {
659
+ return {
660
+ error: "search requires a non-empty query string",
661
+ hint: "Call this tool again with { query: 'your search terms' }. For comparisons use { action: 'compare', query: 'item A vs item B' }; for fact-checking use { action: 'fact_check', claim: 'the claim text' }.",
662
+ };
663
+ }
664
+
651
665
  const model = "gemini-2.5-flash";
652
666
  const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`;
653
667