@jackchen_me/open-multi-agent 0.2.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/README.md +87 -20
  2. package/dist/agent/agent.d.ts +15 -1
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/agent.js +144 -10
  5. package/dist/agent/agent.js.map +1 -1
  6. package/dist/agent/loop-detector.d.ts +39 -0
  7. package/dist/agent/loop-detector.d.ts.map +1 -0
  8. package/dist/agent/loop-detector.js +122 -0
  9. package/dist/agent/loop-detector.js.map +1 -0
  10. package/dist/agent/pool.d.ts +2 -1
  11. package/dist/agent/pool.d.ts.map +1 -1
  12. package/dist/agent/pool.js +4 -2
  13. package/dist/agent/pool.js.map +1 -1
  14. package/dist/agent/runner.d.ts +23 -1
  15. package/dist/agent/runner.d.ts.map +1 -1
  16. package/dist/agent/runner.js +113 -12
  17. package/dist/agent/runner.js.map +1 -1
  18. package/dist/index.d.ts +3 -1
  19. package/dist/index.d.ts.map +1 -1
  20. package/dist/index.js +2 -0
  21. package/dist/index.js.map +1 -1
  22. package/dist/llm/adapter.d.ts +4 -1
  23. package/dist/llm/adapter.d.ts.map +1 -1
  24. package/dist/llm/adapter.js +11 -0
  25. package/dist/llm/adapter.js.map +1 -1
  26. package/dist/llm/copilot.d.ts.map +1 -1
  27. package/dist/llm/copilot.js +2 -1
  28. package/dist/llm/copilot.js.map +1 -1
  29. package/dist/llm/gemini.d.ts +65 -0
  30. package/dist/llm/gemini.d.ts.map +1 -0
  31. package/dist/llm/gemini.js +317 -0
  32. package/dist/llm/gemini.js.map +1 -0
  33. package/dist/llm/grok.d.ts +21 -0
  34. package/dist/llm/grok.d.ts.map +1 -0
  35. package/dist/llm/grok.js +24 -0
  36. package/dist/llm/grok.js.map +1 -0
  37. package/dist/llm/openai-common.d.ts +8 -1
  38. package/dist/llm/openai-common.d.ts.map +1 -1
  39. package/dist/llm/openai-common.js +35 -2
  40. package/dist/llm/openai-common.js.map +1 -1
  41. package/dist/llm/openai.d.ts +1 -1
  42. package/dist/llm/openai.d.ts.map +1 -1
  43. package/dist/llm/openai.js +20 -2
  44. package/dist/llm/openai.js.map +1 -1
  45. package/dist/orchestrator/orchestrator.d.ts.map +1 -1
  46. package/dist/orchestrator/orchestrator.js +89 -9
  47. package/dist/orchestrator/orchestrator.js.map +1 -1
  48. package/dist/task/queue.d.ts +31 -2
  49. package/dist/task/queue.d.ts.map +1 -1
  50. package/dist/task/queue.js +69 -2
  51. package/dist/task/queue.js.map +1 -1
  52. package/dist/tool/text-tool-extractor.d.ts +32 -0
  53. package/dist/tool/text-tool-extractor.d.ts.map +1 -0
  54. package/dist/tool/text-tool-extractor.js +187 -0
  55. package/dist/tool/text-tool-extractor.js.map +1 -0
  56. package/dist/types.d.ts +139 -7
  57. package/dist/types.d.ts.map +1 -1
  58. package/dist/utils/trace.d.ts +12 -0
  59. package/dist/utils/trace.d.ts.map +1 -0
  60. package/dist/utils/trace.js +30 -0
  61. package/dist/utils/trace.js.map +1 -0
  62. package/package.json +18 -2
  63. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  64. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  65. package/.github/pull_request_template.md +0 -14
  66. package/.github/workflows/ci.yml +0 -23
  67. package/CLAUDE.md +0 -72
  68. package/CODE_OF_CONDUCT.md +0 -48
  69. package/CONTRIBUTING.md +0 -72
  70. package/DECISIONS.md +0 -43
  71. package/README_zh.md +0 -217
  72. package/SECURITY.md +0 -17
  73. package/examples/01-single-agent.ts +0 -131
  74. package/examples/02-team-collaboration.ts +0 -167
  75. package/examples/03-task-pipeline.ts +0 -201
  76. package/examples/04-multi-model-team.ts +0 -261
  77. package/examples/05-copilot-test.ts +0 -49
  78. package/examples/06-local-model.ts +0 -199
  79. package/examples/07-fan-out-aggregate.ts +0 -209
  80. package/examples/08-gemma4-local.ts +0 -203
  81. package/examples/09-gemma4-auto-orchestration.ts +0 -162
  82. package/src/agent/agent.ts +0 -473
  83. package/src/agent/pool.ts +0 -278
  84. package/src/agent/runner.ts +0 -413
  85. package/src/agent/structured-output.ts +0 -126
  86. package/src/index.ts +0 -167
  87. package/src/llm/adapter.ts +0 -87
  88. package/src/llm/anthropic.ts +0 -389
  89. package/src/llm/copilot.ts +0 -551
  90. package/src/llm/openai-common.ts +0 -255
  91. package/src/llm/openai.ts +0 -272
  92. package/src/memory/shared.ts +0 -181
  93. package/src/memory/store.ts +0 -124
  94. package/src/orchestrator/orchestrator.ts +0 -977
  95. package/src/orchestrator/scheduler.ts +0 -352
  96. package/src/task/queue.ts +0 -394
  97. package/src/task/task.ts +0 -239
  98. package/src/team/messaging.ts +0 -232
  99. package/src/team/team.ts +0 -334
  100. package/src/tool/built-in/bash.ts +0 -187
  101. package/src/tool/built-in/file-edit.ts +0 -154
  102. package/src/tool/built-in/file-read.ts +0 -105
  103. package/src/tool/built-in/file-write.ts +0 -81
  104. package/src/tool/built-in/grep.ts +0 -362
  105. package/src/tool/built-in/index.ts +0 -50
  106. package/src/tool/executor.ts +0 -178
  107. package/src/tool/framework.ts +0 -557
  108. package/src/types.ts +0 -391
  109. package/src/utils/semaphore.ts +0 -89
  110. package/tests/semaphore.test.ts +0 -57
  111. package/tests/shared-memory.test.ts +0 -122
  112. package/tests/structured-output.test.ts +0 -331
  113. package/tests/task-queue.test.ts +0 -244
  114. package/tests/task-retry.test.ts +0 -368
  115. package/tests/task-utils.test.ts +0 -155
  116. package/tests/tool-executor.test.ts +0 -193
  117. package/tsconfig.json +0 -25
@@ -0,0 +1,30 @@
1
+ /**
2
+ * @fileoverview Trace emission utilities for the observability layer.
3
+ */
4
+ import { randomUUID } from 'node:crypto';
5
+ /**
6
+ * Safely emit a trace event. Swallows callback errors so a broken
7
+ * subscriber never crashes agent execution.
8
+ */
9
+ export function emitTrace(fn, event) {
10
+ if (!fn)
11
+ return;
12
+ try {
13
+ // Guard async callbacks: if fn returns a Promise, swallow its rejection
14
+ // so an async onTrace never produces an unhandled promise rejection.
15
+ const result = fn(event);
16
+ if (result && typeof result.catch === 'function') {
17
+ ;
18
+ result.catch(noop);
19
+ }
20
+ }
21
+ catch {
22
+ // Intentionally swallowed — observability must never break execution.
23
+ }
24
+ }
25
+ function noop() { }
26
+ /** Generate a unique run ID for trace correlation. */
27
+ export function generateRunId() {
28
+ return randomUUID();
29
+ }
30
+ //# sourceMappingURL=trace.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"trace.js","sourceRoot":"","sources":["../../src/utils/trace.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,EAAE,UAAU,EAAE,MAAM,aAAa,CAAA;AAGxC;;;GAGG;AACH,MAAM,UAAU,SAAS,CACvB,EAA6D,EAC7D,KAAiB;IAEjB,IAAI,CAAC,EAAE;QAAE,OAAM;IACf,IAAI,CAAC;QACH,wEAAwE;QACxE,qEAAqE;QACrE,MAAM,MAAM,GAAG,EAAE,CAAC,KAAK,CAAY,CAAA;QACnC,IAAI,MAAM,IAAI,OAAQ,MAA2B,CAAC,KAAK,KAAK,UAAU,EAAE,CAAC;YACvE,CAAC;YAAC,MAA2B,CAAC,KAAK,CAAC,IAAI,CAAC,CAAA;QAC3C,CAAC;IACH,CAAC;IAAC,MAAM,CAAC;QACP,sEAAsE;IACxE,CAAC;AACH,CAAC;AAED,SAAS,IAAI,KAAI,CAAC;AAElB,sDAAsD;AACtD,MAAM,UAAU,aAAa;IAC3B,OAAO,UAAU,EAAE,CAAA;AACrB,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,12 @@
1
1
  {
2
2
  "name": "@jackchen_me/open-multi-agent",
3
- "version": "0.2.0",
4
- "description": "Production-grade multi-agent orchestration framework. Model-agnostic, supports team collaboration, task scheduling, and inter-agent communication.",
3
+ "version": "1.0.1",
4
+ "description": "TypeScript multi-agent framework one runTeam() call from goal to result. Auto task decomposition, parallel execution. 3 dependencies, deploys anywhere Node.js runs.",
5
+ "files": [
6
+ "dist",
7
+ "README.md",
8
+ "LICENSE"
9
+ ],
5
10
  "type": "module",
6
11
  "main": "dist/index.js",
7
12
  "types": "dist/index.d.ts",
@@ -17,6 +22,7 @@
17
22
  "test": "vitest run",
18
23
  "test:watch": "vitest",
19
24
  "lint": "tsc --noEmit",
25
+ "test:e2e": "RUN_E2E=1 vitest run tests/e2e/",
20
26
  "prepublishOnly": "npm run build"
21
27
  },
22
28
  "keywords": [
@@ -41,8 +47,18 @@
41
47
  "openai": "^4.73.0",
42
48
  "zod": "^3.23.0"
43
49
  },
50
+ "peerDependencies": {
51
+ "@google/genai": "^1.48.0"
52
+ },
53
+ "peerDependenciesMeta": {
54
+ "@google/genai": {
55
+ "optional": true
56
+ }
57
+ },
44
58
  "devDependencies": {
59
+ "@google/genai": "^1.48.0",
45
60
  "@types/node": "^22.0.0",
61
+ "@vitest/coverage-v8": "^2.1.9",
46
62
  "tsx": "^4.21.0",
47
63
  "typescript": "^5.6.0",
48
64
  "vitest": "^2.1.0"
@@ -1,40 +0,0 @@
1
- ---
2
- name: Bug Report
3
- about: Report a bug to help us improve
4
- title: "[Bug] "
5
- labels: bug
6
- assignees: ''
7
- ---
8
-
9
- ## Describe the bug
10
-
11
- A clear and concise description of what the bug is.
12
-
13
- ## To Reproduce
14
-
15
- Steps to reproduce the behavior:
16
-
17
- 1. Configure agent with '...'
18
- 2. Call `runTeam(...)` with '...'
19
- 3. See error
20
-
21
- ## Expected behavior
22
-
23
- A clear description of what you expected to happen.
24
-
25
- ## Error output
26
-
27
- ```
28
- Paste any error messages or logs here
29
- ```
30
-
31
- ## Environment
32
-
33
- - OS: [e.g. macOS 14, Ubuntu 22.04]
34
- - Node.js version: [e.g. 20.11]
35
- - Package version: [e.g. 0.1.0]
36
- - LLM provider: [e.g. Anthropic, OpenAI]
37
-
38
- ## Additional context
39
-
40
- Add any other context about the problem here.
@@ -1,23 +0,0 @@
1
- ---
2
- name: Feature Request
3
- about: Suggest an idea for this project
4
- title: "[Feature] "
5
- labels: enhancement
6
- assignees: ''
7
- ---
8
-
9
- ## Problem
10
-
11
- A clear description of the problem or limitation you're experiencing.
12
-
13
- ## Proposed Solution
14
-
15
- Describe what you'd like to happen.
16
-
17
- ## Alternatives Considered
18
-
19
- Any alternative solutions or features you've considered.
20
-
21
- ## Additional context
22
-
23
- Add any other context, code examples, or screenshots about the feature request here.
@@ -1,14 +0,0 @@
1
- ## What
2
-
3
- <!-- What does this PR do? One or two sentences. -->
4
-
5
- ## Why
6
-
7
- <!-- Why is this change needed? Link to an issue if applicable: Fixes #123 -->
8
-
9
- ## Checklist
10
-
11
- - [ ] `npm run lint` passes
12
- - [ ] `npm test` passes
13
- - [ ] Added/updated tests for changed behavior
14
- - [ ] No new runtime dependencies (or justified in the PR description)
@@ -1,23 +0,0 @@
1
- name: CI
2
-
3
- on:
4
- push:
5
- branches: [main]
6
- pull_request:
7
- branches: [main]
8
-
9
- jobs:
10
- test:
11
- runs-on: ubuntu-latest
12
- strategy:
13
- matrix:
14
- node-version: [18, 20, 22]
15
- steps:
16
- - uses: actions/checkout@v4
17
- - uses: actions/setup-node@v4
18
- with:
19
- node-version: ${{ matrix.node-version }}
20
- cache: npm
21
- - run: npm ci
22
- - run: npm run lint
23
- - run: npm test
package/CLAUDE.md DELETED
@@ -1,72 +0,0 @@
1
- # CLAUDE.md
2
-
3
- This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
-
5
- ## Commands
6
-
7
- ```bash
8
- npm run build # Compile TypeScript (src/ → dist/)
9
- npm run dev # Watch mode compilation
10
- npm run lint # Type-check only (tsc --noEmit)
11
- npm test # Run all tests (vitest run)
12
- npm run test:watch # Vitest watch mode
13
- ```
14
-
15
- No test files exist yet in `tests/`. Examples in `examples/` are standalone scripts requiring API keys (`ANTHROPIC_API_KEY`, `OPENAI_API_KEY`).
16
-
17
- ## Architecture
18
-
19
- ES module TypeScript framework for multi-agent orchestration. Three runtime dependencies: `@anthropic-ai/sdk`, `openai`, `zod`.
20
-
21
- ### Core Execution Flow
22
-
23
- **`OpenMultiAgent`** (`src/orchestrator/orchestrator.ts`) is the top-level public API with three execution modes:
24
-
25
- 1. **`runAgent(config, prompt)`** — single agent, one-shot
26
- 2. **`runTeam(team, goal)`** — automatic orchestration: a temporary "coordinator" agent decomposes the goal into a task DAG via LLM call, then tasks execute in dependency order
27
- 3. **`runTasks(team, tasks)`** — explicit task pipeline with user-defined dependencies
28
-
29
- ### The Coordinator Pattern (runTeam)
30
-
31
- This is the framework's key feature. When `runTeam()` is called:
32
- 1. A coordinator agent receives the goal + agent roster and produces a JSON task array (title, description, assignee, dependsOn)
33
- 2. `TaskQueue` resolves dependencies topologically — independent tasks run in parallel, dependent tasks wait
34
- 3. `Scheduler` auto-assigns any unassigned tasks (strategies: `dependency-first` default, `round-robin`, `least-busy`, `capability-match`)
35
- 4. Each task result is written to `SharedMemory` so subsequent agents see prior results
36
- 5. The coordinator synthesizes all task results into a final output
37
-
38
- ### Layer Map
39
-
40
- | Layer | Files | Responsibility |
41
- |-------|-------|----------------|
42
- | Orchestrator | `orchestrator/orchestrator.ts`, `orchestrator/scheduler.ts` | Top-level API, task decomposition, coordinator pattern |
43
- | Team | `team/team.ts`, `team/messaging.ts` | Agent roster, MessageBus (point-to-point + broadcast), SharedMemory binding |
44
- | Agent | `agent/agent.ts`, `agent/runner.ts`, `agent/pool.ts` | Agent lifecycle (idle→running→completed/error), conversation loop, concurrency pool with Semaphore |
45
- | Task | `task/queue.ts`, `task/task.ts` | Dependency-aware queue, auto-unblock on completion, cascade failure to dependents |
46
- | Tool | `tool/framework.ts`, `tool/executor.ts`, `tool/built-in/` | `defineTool()` with Zod schemas, ToolRegistry, parallel batch execution with concurrency semaphore |
47
- | LLM | `llm/adapter.ts`, `llm/anthropic.ts`, `llm/openai.ts` | `LLMAdapter` interface (`chat` + `stream`), factory `createAdapter()` |
48
- | Memory | `memory/shared.ts`, `memory/store.ts` | Namespaced key-value store (`agentName/key`), markdown summary injection into prompts |
49
- | Types | `types.ts` | All interfaces in one file to avoid circular deps |
50
- | Exports | `index.ts` | Public API surface |
51
-
52
- ### Agent Conversation Loop (AgentRunner)
53
-
54
- `AgentRunner.run()`: send messages → extract tool-use blocks → execute tools in parallel batch → append results → loop until `end_turn` or `maxTurns` exhausted. Accumulates `TokenUsage` across all turns.
55
-
56
- ### Concurrency Control
57
-
58
- Two independent semaphores: `AgentPool` (max concurrent agent runs, default 5) and `ToolExecutor` (max concurrent tool calls, default 4).
59
-
60
- ### Error Handling
61
-
62
- - Tool errors → caught, returned as `ToolResult(isError: true)`, never thrown
63
- - Task failures → cascade to all dependents; independent tasks continue
64
- - LLM API errors → propagate to caller
65
-
66
- ### Built-in Tools
67
-
68
- `bash`, `file_read`, `file_write`, `file_edit`, `grep` — registered via `registerBuiltInTools(registry)`.
69
-
70
- ### Adding an LLM Adapter
71
-
72
- Implement `LLMAdapter` interface with `chat(messages, options)` and `stream(messages, options)`, then register in `createAdapter()` factory in `src/llm/adapter.ts`.
@@ -1,48 +0,0 @@
1
- # Contributor Covenant Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- We as members, contributors, and leaders pledge to make participation in our
6
- community a positive experience for everyone, regardless of background or
7
- identity.
8
-
9
- ## Our Standards
10
-
11
- Examples of behavior that contributes to a positive environment:
12
-
13
- - Using welcoming and inclusive language
14
- - Being respectful of differing viewpoints and experiences
15
- - Gracefully accepting constructive feedback
16
- - Focusing on what is best for the community
17
- - Showing empathy towards other community members
18
-
19
- Examples of unacceptable behavior:
20
-
21
- - Trolling, insulting or derogatory comments, and personal attacks
22
- - Public or private unwelcome conduct
23
- - Publishing others' private information without explicit permission
24
- - Other conduct which could reasonably be considered inappropriate in a
25
- professional setting
26
-
27
- ## Enforcement Responsibilities
28
-
29
- Community leaders are responsible for clarifying and enforcing our standards of
30
- acceptable behavior and will take appropriate and fair corrective action in
31
- response to any behavior that they deem inappropriate or harmful.
32
-
33
- ## Scope
34
-
35
- This Code of Conduct applies within all community spaces, and also applies when
36
- an individual is officially representing the community in public spaces.
37
-
38
- ## Enforcement
39
-
40
- Instances of unacceptable behavior may be reported to the community leaders
41
- responsible for enforcement at **jack@yuanasi.com**. All complaints will be
42
- reviewed and investigated promptly and fairly.
43
-
44
- ## Attribution
45
-
46
- This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org),
47
- version 2.1, available at
48
- [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html).
package/CONTRIBUTING.md DELETED
@@ -1,72 +0,0 @@
1
- # Contributing
2
-
3
- Thanks for your interest in contributing to Open Multi-Agent! This guide covers the basics to get you started.
4
-
5
- ## Setup
6
-
7
- ```bash
8
- git clone https://github.com/JackChen-me/open-multi-agent.git
9
- cd open-multi-agent
10
- npm install
11
- ```
12
-
13
- Requires Node.js >= 18.
14
-
15
- ## Development Commands
16
-
17
- ```bash
18
- npm run build # Compile TypeScript (src/ → dist/)
19
- npm run dev # Watch mode compilation
20
- npm run lint # Type-check (tsc --noEmit)
21
- npm test # Run all tests (vitest)
22
- npm run test:watch # Vitest watch mode
23
- ```
24
-
25
- ## Running Tests
26
-
27
- All tests live in `tests/`. They test core modules (TaskQueue, SharedMemory, ToolExecutor, Semaphore) without requiring API keys or network access.
28
-
29
- ```bash
30
- npm test
31
- ```
32
-
33
- Every PR must pass `npm run lint && npm test`. CI runs both automatically on Node 18, 20, and 22.
34
-
35
- ## Making a Pull Request
36
-
37
- 1. Fork the repo and create a branch from `main`
38
- 2. Make your changes
39
- 3. Add or update tests if you changed behavior
40
- 4. Run `npm run lint && npm test` locally
41
- 5. Open a PR against `main`
42
-
43
- ### PR Checklist
44
-
45
- - [ ] `npm run lint` passes
46
- - [ ] `npm test` passes
47
- - [ ] New behavior has test coverage
48
- - [ ] Linked to a relevant issue (if one exists)
49
-
50
- ## Code Style
51
-
52
- - TypeScript strict mode, ES modules (`.js` extensions in imports)
53
- - No additional linter/formatter configured — follow existing patterns
54
- - Keep dependencies minimal (currently 3 runtime deps: `@anthropic-ai/sdk`, `openai`, `zod`)
55
-
56
- ## Architecture Overview
57
-
58
- See the [README](./README.md#architecture) for an architecture diagram. Key entry points:
59
-
60
- - **Orchestrator**: `src/orchestrator/orchestrator.ts` — top-level API
61
- - **Task system**: `src/task/queue.ts`, `src/task/task.ts` — dependency DAG
62
- - **Agent**: `src/agent/runner.ts` — conversation loop
63
- - **Tools**: `src/tool/framework.ts`, `src/tool/executor.ts` — tool registry and execution
64
- - **LLM adapters**: `src/llm/` — Anthropic, OpenAI, Copilot
65
-
66
- ## Where to Contribute
67
-
68
- Check the [issues](https://github.com/JackChen-me/open-multi-agent/issues) page. Issues labeled `good first issue` are scoped and approachable. Issues labeled `help wanted` are larger but well-defined.
69
-
70
- ## License
71
-
72
- By contributing, you agree that your contributions will be licensed under the MIT License.
package/DECISIONS.md DELETED
@@ -1,43 +0,0 @@
1
- # Architecture Decisions
2
-
3
- This document records deliberate "won't do" decisions for the project. These are features we evaluated and chose NOT to implement — not because they're bad ideas, but because they conflict with our positioning as the **simplest multi-agent framework**.
4
-
5
- If you're considering a PR in any of these areas, please open a discussion first.
6
-
7
- ## Won't Do
8
-
9
- ### 1. Agent Handoffs
10
-
11
- **What**: Agent A transfers an in-progress conversation to Agent B (like OpenAI Agents SDK `handoff()`).
12
-
13
- **Why not**: Handoffs are a different paradigm from our task-based model. Our tasks have clear boundaries — one agent, one task, one result. Handoffs blur those boundaries and add state-transfer complexity. Users who need handoffs likely need a different framework (OpenAI Agents SDK is purpose-built for this).
14
-
15
- ### 2. State Persistence / Checkpointing
16
-
17
- **What**: Save workflow state to a database so long-running workflows can resume after crashes (like LangGraph checkpointing).
18
-
19
- **Why not**: Requires a storage backend (SQLite, Redis, Postgres), schema migrations, and serialization logic. This is enterprise infrastructure — it triples the complexity surface. Our target users run workflows that complete in seconds to minutes, not hours. If you need checkpointing, LangGraph is the right tool.
20
-
21
- **Related**: Closing #20 with this rationale.
22
-
23
- ### 3. A2A Protocol (Agent-to-Agent)
24
-
25
- **What**: Google's open protocol for agents on different servers to discover and communicate with each other.
26
-
27
- **Why not**: Too early — the spec is still evolving and adoption is minimal. Our users run agents in a single process, not across distributed services. If A2A matures and there's real demand, we can revisit. Today it would add complexity for zero practical benefit.
28
-
29
- ### 4. MCP Integration (Model Context Protocol)
30
-
31
- **What**: Anthropic's protocol for connecting LLMs to external tools and data sources.
32
-
33
- **Why not**: MCP is valuable but targets a different layer. Our `defineTool()` API already lets users wrap any external service as a tool in ~10 lines of code. Adding MCP would mean maintaining protocol compatibility, transport layers, and tool discovery — complexity that serves tool platform builders, not our target users who just want to run agent teams.
34
-
35
- ### 5. Dashboard / Visualization
36
-
37
- **What**: Built-in web UI to visualize task DAGs, agent activity, and token usage.
38
-
39
- **Why not**: We expose data, we don't build UI. The `onProgress` callback and upcoming `onTrace` (#18) give users all the raw data. They can pipe it into Grafana, build a custom dashboard, or use console logs. Shipping a web UI means owning a frontend stack, which is outside our scope.
40
-
41
- ---
42
-
43
- *Last updated: 2026-04-03*
package/README_zh.md DELETED
@@ -1,217 +0,0 @@
1
- # Open Multi-Agent
2
-
3
- 构建能自动拆解目标的 AI 智能体团队。定义智能体的角色和工具,描述一个目标——框架自动规划任务图、调度依赖、并行执行。
4
-
5
- 3 个运行时依赖,27 个源文件,一次 `runTeam()` 调用从目标到结果。
6
-
7
- [![GitHub stars](https://img.shields.io/github/stars/JackChen-me/open-multi-agent)](https://github.com/JackChen-me/open-multi-agent/stargazers)
8
- [![license](https://img.shields.io/github/license/JackChen-me/open-multi-agent)](./LICENSE)
9
- [![TypeScript](https://img.shields.io/badge/TypeScript-5.6-blue)](https://www.typescriptlang.org/)
10
-
11
- [English](./README.md) | **中文**
12
-
13
- ## 为什么选择 Open Multi-Agent?
14
-
15
- - **自动任务拆解** — 用自然语言描述目标,内置的协调者智能体自动将其拆解为带依赖关系和分配的任务图——无需手动编排。
16
- - **多智能体团队** — 定义不同角色、工具甚至不同模型的智能体。它们通过消息总线和共享内存协作。
17
- - **任务 DAG 调度** — 任务之间存在依赖关系。框架进行拓扑排序——有依赖的任务等待,无依赖的任务并行执行。
18
- - **模型无关** — Claude、GPT、Gemma 4 和本地模型(Ollama、vLLM、LM Studio)可以在同一个团队中使用。通过 `baseURL` 即可接入任何 OpenAI 兼容服务。
19
- - **结构化输出** — 为任意智能体添加 `outputSchema`(Zod),输出自动解析为 JSON 并校验,校验失败自动重试一次。通过 `result.structured` 获取类型化结果。
20
- - **任务重试** — 为任务设置 `maxRetries`,失败时自动指数退避重试。所有尝试的 token 用量累计,确保计费准确。
21
- - **进程内执行** — 没有子进程开销。所有内容在一个 Node.js 进程中运行。可部署到 Serverless、Docker、CI/CD。
22
-
23
- ## 快速开始
24
-
25
- 需要 Node.js >= 18。
26
-
27
- ```bash
28
- npm install @jackchen_me/open-multi-agent
29
- ```
30
-
31
- 在环境变量中设置 `ANTHROPIC_API_KEY`(以及可选的 `OPENAI_API_KEY` 或用于 Copilot 的 `GITHUB_TOKEN`)。通过 Ollama 使用本地模型无需 API key — 参见 [example 06](examples/06-local-model.ts)。
32
-
33
- 三个智能体,一个目标——框架处理剩下的一切:
34
-
35
- ```typescript
36
- import { OpenMultiAgent } from '@jackchen_me/open-multi-agent'
37
- import type { AgentConfig } from '@jackchen_me/open-multi-agent'
38
-
39
- const architect: AgentConfig = {
40
- name: 'architect',
41
- model: 'claude-sonnet-4-6',
42
- systemPrompt: 'You design clean API contracts and file structures.',
43
- tools: ['file_write'],
44
- }
45
-
46
- const developer: AgentConfig = {
47
- name: 'developer',
48
- model: 'claude-sonnet-4-6',
49
- systemPrompt: 'You implement what the architect designs.',
50
- tools: ['bash', 'file_read', 'file_write', 'file_edit'],
51
- }
52
-
53
- const reviewer: AgentConfig = {
54
- name: 'reviewer',
55
- model: 'claude-sonnet-4-6',
56
- systemPrompt: 'You review code for correctness and clarity.',
57
- tools: ['file_read', 'grep'],
58
- }
59
-
60
- const orchestrator = new OpenMultiAgent({
61
- defaultModel: 'claude-sonnet-4-6',
62
- onProgress: (event) => console.log(event.type, event.agent ?? event.task ?? ''),
63
- })
64
-
65
- const team = orchestrator.createTeam('api-team', {
66
- name: 'api-team',
67
- agents: [architect, developer, reviewer],
68
- sharedMemory: true,
69
- })
70
-
71
- // 描述一个目标——框架将其拆解为任务并编排执行
72
- const result = await orchestrator.runTeam(team, 'Create a REST API for a todo list in /tmp/todo-api/')
73
-
74
- console.log(`成功: ${result.success}`)
75
- console.log(`Token 用量: ${result.totalTokenUsage.output_tokens} output tokens`)
76
- ```
77
-
78
- 执行过程:
79
-
80
- ```
81
- agent_start coordinator
82
- task_start architect
83
- task_complete architect
84
- task_start developer
85
- task_start developer // 无依赖的任务并行执行
86
- task_complete developer
87
- task_start reviewer // 实现完成后自动解锁
88
- task_complete developer
89
- task_complete reviewer
90
- agent_complete coordinator // 综合所有结果
91
- Success: true
92
- Tokens: 12847 output tokens
93
- ```
94
-
95
- ## 作者
96
-
97
- > JackChen — 前 WPS 产品经理,现独立创业者。关注小红书[「杰克西|硅基杠杆」](https://www.xiaohongshu.com/user/profile/5a1bdc1e4eacab4aa39ea6d6),持续获取我的 AI Agent 观点和思考。
98
-
99
- ## 三种运行模式
100
-
101
- | 模式 | 方法 | 适用场景 |
102
- |------|------|----------|
103
- | 单智能体 | `runAgent()` | 一个智能体,一个提示词——最简入口 |
104
- | 自动编排团队 | `runTeam()` | 给一个目标,框架自动规划和执行 |
105
- | 显式任务管线 | `runTasks()` | 你自己定义任务图和分配 |
106
-
107
- ## 贡献者
108
-
109
- <a href="https://github.com/JackChen-me/open-multi-agent/graphs/contributors">
110
- <img src="https://contrib.rocks/image?repo=JackChen-me/open-multi-agent" />
111
- </a>
112
-
113
- ## 示例
114
-
115
- 所有示例都是可运行脚本,位于 [`examples/`](./examples/) 目录。使用 `npx tsx` 运行:
116
-
117
- ```bash
118
- npx tsx examples/01-single-agent.ts
119
- ```
120
-
121
- | 示例 | 展示内容 |
122
- |------|----------|
123
- | [01 — 单智能体](examples/01-single-agent.ts) | `runAgent()` 单次调用、`stream()` 流式输出、`prompt()` 多轮对话 |
124
- | [02 — 团队协作](examples/02-team-collaboration.ts) | `runTeam()` 自动编排 + 协调者模式 |
125
- | [03 — 任务流水线](examples/03-task-pipeline.ts) | `runTasks()` 显式依赖图(设计 → 实现 → 测试 + 评审) |
126
- | [04 — 多模型团队](examples/04-multi-model-team.ts) | `defineTool()` 自定义工具、Anthropic + OpenAI 混合、`AgentPool` |
127
- | [05 — Copilot](examples/05-copilot-test.ts) | GitHub Copilot 作为 LLM 提供者 |
128
- | [06 — 本地模型](examples/06-local-model.ts) | Ollama + Claude 混合流水线,通过 `baseURL` 接入(兼容 vLLM、LM Studio 等) |
129
- | [07 — 扇出聚合](examples/07-fan-out-aggregate.ts) | `runParallel()` MapReduce — 3 个分析师并行,然后综合 |
130
- | [08 — Gemma 4 本地](examples/08-gemma4-local.ts) | 纯本地 Gemma 4 智能体团队 + tool-calling — 零 API 费用 |
131
- | [09 — Gemma 4 自动编排](examples/09-gemma4-auto-orchestration.ts) | `runTeam()` 用 Gemma 4 当 coordinator — 自动任务拆解,完全本地 |
132
-
133
- ## 架构
134
-
135
- ```
136
- ┌─────────────────────────────────────────────────────────────────┐
137
- │ OpenMultiAgent (Orchestrator) │
138
- │ │
139
- │ createTeam() runTeam() runTasks() runAgent() getStatus() │
140
- └──────────────────────┬──────────────────────────────────────────┘
141
-
142
- ┌──────────▼──────────┐
143
- │ Team │
144
- │ - AgentConfig[] │
145
- │ - MessageBus │
146
- │ - TaskQueue │
147
- │ - SharedMemory │
148
- └──────────┬──────────┘
149
-
150
- ┌─────────────┴─────────────┐
151
- │ │
152
- ┌────────▼──────────┐ ┌───────────▼───────────┐
153
- │ AgentPool │ │ TaskQueue │
154
- │ - Semaphore │ │ - dependency graph │
155
- │ - runParallel() │ │ - auto unblock │
156
- └────────┬──────────┘ │ - cascade failure │
157
- │ └───────────────────────┘
158
- ┌────────▼──────────┐
159
- │ Agent │
160
- │ - run() │ ┌──────────────────────┐
161
- │ - prompt() │───►│ LLMAdapter │
162
- │ - stream() │ │ - AnthropicAdapter │
163
- └────────┬──────────┘ │ - OpenAIAdapter │
164
- │ │ - CopilotAdapter │
165
- │ └──────────────────────┘
166
- ┌────────▼──────────┐
167
- │ AgentRunner │ ┌──────────────────────┐
168
- │ - conversation │───►│ ToolRegistry │
169
- │ loop │ │ - defineTool() │
170
- │ - tool dispatch │ │ - 5 built-in tools │
171
- └───────────────────┘ └──────────────────────┘
172
- ```
173
-
174
- ## 内置工具
175
-
176
- | 工具 | 说明 |
177
- |------|------|
178
- | `bash` | 执行 Shell 命令。返回 stdout + stderr。支持超时和工作目录设置。 |
179
- | `file_read` | 读取指定绝对路径的文件内容。支持偏移量和行数限制以处理大文件。 |
180
- | `file_write` | 写入或创建文件。自动创建父目录。 |
181
- | `file_edit` | 通过精确字符串匹配编辑文件。 |
182
- | `grep` | 使用正则表达式搜索文件内容。优先使用 ripgrep,回退到 Node.js 实现。 |
183
-
184
- ## 支持的 Provider
185
-
186
- | Provider | 配置 | 环境变量 | 状态 |
187
- |----------|------|----------|------|
188
- | Anthropic (Claude) | `provider: 'anthropic'` | `ANTHROPIC_API_KEY` | 已验证 |
189
- | OpenAI (GPT) | `provider: 'openai'` | `OPENAI_API_KEY` | 已验证 |
190
- | GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | 已验证 |
191
- | Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | 已验证 |
192
-
193
- 已验证支持 tool-calling 的本地模型:**Gemma 4**(见[示例 08](examples/08-gemma4-local.ts))。
194
-
195
- 任何 OpenAI 兼容 API 均可通过 `provider: 'openai'` + `baseURL` 接入(DeepSeek、Groq、Mistral、Qwen、MiniMax 等)。这些 Provider 尚未完整验证——欢迎通过 [#25](https://github.com/JackChen-me/open-multi-agent/issues/25) 贡献验证。
196
-
197
- ## 参与贡献
198
-
199
- 欢迎提 Issue、功能需求和 PR。以下方向的贡献尤其有价值:
200
-
201
- - **Provider 集成** — 验证并文档化 OpenAI 兼容 Provider(DeepSeek、Groq、Qwen、MiniMax 等)通过 `baseURL` 接入。详见 [#25](https://github.com/JackChen-me/open-multi-agent/issues/25)。对于非 OpenAI 兼容的 Provider(如 Gemini),欢迎贡献新的 `LLMAdapter` 实现——接口只需两个方法:`chat()` 和 `stream()`。
202
- - **示例** — 真实场景的工作流和用例。
203
- - **文档** — 指南、教程和 API 文档。
204
-
205
- ## Star 趋势
206
-
207
- <a href="https://star-history.com/#JackChen-me/open-multi-agent&Date">
208
- <picture>
209
- <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=JackChen-me/open-multi-agent&type=Date&theme=dark&v=20260403" />
210
- <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=JackChen-me/open-multi-agent&type=Date&v=20260403" />
211
- <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=JackChen-me/open-multi-agent&type=Date&v=20260403" />
212
- </picture>
213
- </a>
214
-
215
- ## 许可证
216
-
217
- MIT
package/SECURITY.md DELETED
@@ -1,17 +0,0 @@
1
- # Security Policy
2
-
3
- ## Supported Versions
4
-
5
- | Version | Supported |
6
- |---------|-----------|
7
- | latest | Yes |
8
-
9
- ## Reporting a Vulnerability
10
-
11
- If you discover a security vulnerability, please report it responsibly via email:
12
-
13
- **jack@yuanasi.com**
14
-
15
- Please do **not** open a public GitHub issue for security vulnerabilities.
16
-
17
- We will acknowledge receipt within 48 hours and aim to provide a fix or mitigation plan within 7 days.