marco-agent 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +141 -28
- package/dist/bin.js +0 -0
- package/dist/mcp.d.ts +1 -0
- package/dist/mcp.d.ts.map +1 -1
- package/dist/mcp.js +1 -1
- package/dist/mcp.js.map +1 -1
- package/package.json +10 -2
package/README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
# marco-agent
|
|
2
2
|
|
|
3
|
-
A simple, extensible AI agent built on [marco-harness](https://github.com/pyrotank41/MARCO). Works in CLI, server-side Node,
|
|
3
|
+
A simple, extensible AI agent built on [marco-harness](https://github.com/pyrotank41/MARCO). Works in CLI, server-side Node, Next.js, and Edge runtimes — with Anthropic, OpenAI, OpenRouter, DeepSeek, Together, Groq, vLLM, LM Studio, or any OpenAI-compatible endpoint.
|
|
4
4
|
|
|
5
|
-
`MarcoAgent` is a thin wrapper around the harness
|
|
5
|
+
`MarcoAgent` is a thin, composable wrapper around the harness. Sensible defaults out of the box, every knob exposed when you need it: streaming, multi-turn history, usage + cost tracking, per-turn budget guards, MCP-server-to-Tool bridge, opt-in compaction, and reasoning-content surfacing for chain-of-thought models.
|
|
6
6
|
|
|
7
|
-
> **Designing an integration?** See [`docs/architecture.md`](docs/architecture.md) for the library/app boundary, tool-
|
|
7
|
+
> **Designing an integration?** See [`docs/architecture.md`](docs/architecture.md) for the library/app boundary, tool-source framework, and the design decisions worth knowing. Per-feature deep dives: [`docs/providers.md`](docs/providers.md), [`docs/usage-tracking.md`](docs/usage-tracking.md), [`docs/mcp-bridge.md`](docs/mcp-bridge.md), [`docs/compaction.md`](docs/compaction.md).
|
|
8
8
|
|
|
9
9
|
## Install
|
|
10
10
|
|
|
@@ -22,7 +22,7 @@ const { text } = await agent.ask('What time is it in Tokyo?')
|
|
|
22
22
|
console.log(text)
|
|
23
23
|
```
|
|
24
24
|
|
|
25
|
-
Set `ANTHROPIC_API_KEY` in your environment before running.
|
|
25
|
+
Set `ANTHROPIC_API_KEY` in your environment before running. Default model is `claude-sonnet-4-6`; default tool surface includes `current_time`.
|
|
26
26
|
|
|
27
27
|
## CLI
|
|
28
28
|
|
|
@@ -33,15 +33,26 @@ npx marco-agent --stream "write a haiku about TypeScript"
|
|
|
33
33
|
|
|
34
34
|
## Streaming
|
|
35
35
|
|
|
36
|
-
For chat UIs, server-sent events, etc. —
|
|
36
|
+
For chat UIs, server-sent events, etc. — `agent.stream()` returns an `AsyncGenerator<StreamEvent>` with these event types:
|
|
37
|
+
|
|
38
|
+
| Event | When |
|
|
39
|
+
|---|---|
|
|
40
|
+
| `text` | text token from the model |
|
|
41
|
+
| `reasoning` | chain-of-thought token (DeepSeek R1/V4-Pro, OpenAI o-series, etc.) |
|
|
42
|
+
| `tool_call_start` | model invokes a tool |
|
|
43
|
+
| `tool_call_end` | tool finishes |
|
|
44
|
+
| `usage` | running token + cost total after each model call |
|
|
45
|
+
| `budget_exceeded` | a budget guard tripped |
|
|
46
|
+
| `compaction_start` / `compaction_end` | history compaction summary call begins / finishes |
|
|
47
|
+
| `done` | turn complete; `event.result` carries text, messages, usage |
|
|
37
48
|
|
|
38
49
|
```typescript
|
|
39
50
|
for await (const event of agent.stream('explain monads')) {
|
|
40
51
|
if (event.type === 'text') process.stdout.write(event.text)
|
|
52
|
+
else if (event.type === 'reasoning') process.stderr.write(event.text) // route to a separate panel
|
|
41
53
|
else if (event.type === 'tool_call_start') console.log(`\n[calling ${event.name}]`)
|
|
42
|
-
else if (event.type === '
|
|
43
|
-
|
|
44
|
-
}
|
|
54
|
+
else if (event.type === 'usage') updateLiveSpendCounter(event.usage.costUsd)
|
|
55
|
+
else if (event.type === 'done') persist(event.result.messages)
|
|
45
56
|
}
|
|
46
57
|
```
|
|
47
58
|
|
|
@@ -50,6 +61,8 @@ for await (const event of agent.stream('explain monads')) {
|
|
|
50
61
|
`ask()` and `stream()` both accept a `history` parameter — pass the previous turn's `result.messages` to continue:
|
|
51
62
|
|
|
52
63
|
```typescript
|
|
64
|
+
import type { Message } from 'marco-agent'
|
|
65
|
+
|
|
53
66
|
let history: Message[] = []
|
|
54
67
|
|
|
55
68
|
const r1 = await agent.ask('My name is Karan.', history)
|
|
@@ -59,35 +72,53 @@ const r2 = await agent.ask('What did I just tell you?', history)
|
|
|
59
72
|
console.log(r2.text) // → "You told me your name is Karan."
|
|
60
73
|
```
|
|
61
74
|
|
|
62
|
-
State lives with the caller, not the agent — so
|
|
75
|
+
State lives with the caller, not the agent — so a single `MarcoAgent` instance is safe to share across concurrent web requests.
|
|
63
76
|
|
|
64
|
-
##
|
|
77
|
+
## Choosing a provider
|
|
65
78
|
|
|
66
|
-
|
|
79
|
+
Two providers cover everything. `AnthropicProvider` for Claude. `OpenAICompatibleProvider` for the rest — swap one URL, talk to anything that speaks `/v1/chat/completions`:
|
|
67
80
|
|
|
68
|
-
|
|
81
|
+
| Backend | Provider | `baseURL` |
|
|
82
|
+
|---|---|---|
|
|
83
|
+
| Claude (Anthropic) | `AnthropicProvider` | — |
|
|
84
|
+
| OpenAI direct | `OpenAICompatibleProvider` | `https://api.openai.com/v1` (default) |
|
|
85
|
+
| OpenRouter | `OpenAICompatibleProvider` | `https://openrouter.ai/api/v1` |
|
|
86
|
+
| DeepSeek direct | `OpenAICompatibleProvider` | `https://api.deepseek.com/v1` |
|
|
87
|
+
| Ollama (local Llama, Qwen, etc.) | `OpenAICompatibleProvider` | `http://localhost:11434/v1` |
|
|
88
|
+
| LM Studio (local) | `OpenAICompatibleProvider` | `http://localhost:1234/v1` |
|
|
89
|
+
| Groq, Together, Fireworks, vLLM, … | `OpenAICompatibleProvider` | their `/v1` |
|
|
69
90
|
|
|
70
|
-
|
|
91
|
+
Quick example — OpenRouter:
|
|
71
92
|
|
|
72
93
|
```typescript
|
|
73
|
-
import { MarcoAgent } from 'marco-agent'
|
|
74
|
-
import { z } from 'zod'
|
|
75
|
-
import type { Tool } from 'marco-harness'
|
|
94
|
+
import { MarcoAgent, OpenAICompatibleProvider } from 'marco-agent'
|
|
76
95
|
|
|
77
|
-
const
|
|
96
|
+
const agent = new MarcoAgent({
|
|
97
|
+
provider: new OpenAICompatibleProvider({
|
|
98
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
99
|
+
baseURL: 'https://openrouter.ai/api/v1',
|
|
100
|
+
headers: { 'HTTP-Referer': 'https://yourapp.com', 'X-Title': 'Your App' },
|
|
101
|
+
}),
|
|
102
|
+
model: 'deepseek/deepseek-v4-flash',
|
|
103
|
+
})
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Full recipes for each backend (including local Ollama, direct OpenAI, vLLM, etc.) are in [`docs/providers.md`](docs/providers.md).
|
|
107
|
+
|
|
108
|
+
## Tools — define once with zod
|
|
109
|
+
|
|
110
|
+
`toolFromZod()` derives the JSON Schema and runtime validation from a single zod schema, instead of authoring both by hand:
|
|
111
|
+
|
|
112
|
+
```typescript
|
|
113
|
+
import { MarcoAgent, toolFromZod, z } from 'marco-agent'
|
|
114
|
+
|
|
115
|
+
const calculatorTool = toolFromZod({
|
|
78
116
|
name: 'calculator',
|
|
79
117
|
description: 'Evaluate a basic arithmetic expression.',
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
},
|
|
85
|
-
validate: (i) => z.object({ expression: z.string() }).parse(i),
|
|
86
|
-
handler: async (input) => {
|
|
87
|
-
const { expression } = input as { expression: string }
|
|
88
|
-
return String(Function(`"use strict"; return (${expression})`)())
|
|
89
|
-
},
|
|
90
|
-
}
|
|
118
|
+
schema: z.object({ expression: z.string() }),
|
|
119
|
+
handler: async ({ expression }) =>
|
|
120
|
+
String(Function(`"use strict"; return (${expression})`)()),
|
|
121
|
+
})
|
|
91
122
|
|
|
92
123
|
const agent = new MarcoAgent({
|
|
93
124
|
systemPrompt: 'You are a math tutor. Show your work.',
|
|
@@ -95,6 +126,88 @@ const agent = new MarcoAgent({
|
|
|
95
126
|
})
|
|
96
127
|
```
|
|
97
128
|
|
|
129
|
+
`z` is re-exported from marco-agent so you don't have to manage zod's v4 import path. (Internally uses `zod/v4` for native JSON Schema export.)
|
|
130
|
+
|
|
131
|
+
## MCP server tools — `fromMcpServer()`
|
|
132
|
+
|
|
133
|
+
Connect to any MCP server and turn its tool surface into agent tools with one call:
|
|
134
|
+
|
|
135
|
+
```typescript
|
|
136
|
+
import { MarcoAgent, fromMcpServer, currentTimeTool } from 'marco-agent'
|
|
137
|
+
|
|
138
|
+
const tools = await fromMcpServer({
|
|
139
|
+
url: 'https://your-app.com/api/mcp',
|
|
140
|
+
headers: { 'authorization': `Bearer ${serverSecret}` },
|
|
141
|
+
// Spread into every tool call's arguments AFTER the model's args, so the
|
|
142
|
+
// model can't override — multi-tenant security boundary.
|
|
143
|
+
contextArgs: { target_user_id: requestingUserId },
|
|
144
|
+
// Optional filter for which tools to expose.
|
|
145
|
+
include: ['search_records', 'list_records'],
|
|
146
|
+
})
|
|
147
|
+
|
|
148
|
+
const agent = new MarcoAgent({ tools: [currentTimeTool, ...tools] })
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
Full API: [`docs/mcp-bridge.md`](docs/mcp-bridge.md).
|
|
152
|
+
|
|
153
|
+
## Usage tracking & budgets
|
|
154
|
+
|
|
155
|
+
Tokens are the source of truth, cost is a derived view via injectable pricing function. Defaults ship for current Anthropic models; override for accuracy or non-Anthropic models:
|
|
156
|
+
|
|
157
|
+
```typescript
|
|
158
|
+
const agent = new MarcoAgent({
|
|
159
|
+
budget: {
|
|
160
|
+
maxInputTokensPerTurn: 100_000,
|
|
161
|
+
maxModelCallsPerTurn: 20,
|
|
162
|
+
maxCostUsdPerTurn: 0.50,
|
|
163
|
+
onExceeded: 'abort', // throws BudgetExceededError on ask(), emits budget_exceeded on stream()
|
|
164
|
+
},
|
|
165
|
+
// Optional — pass your own pricing for OpenRouter/DeepSeek/negotiated rates.
|
|
166
|
+
pricing: (model, usage) => /* your $-per-token table */ 0,
|
|
167
|
+
})
|
|
168
|
+
|
|
169
|
+
const { text, usage } = await agent.ask('hello')
|
|
170
|
+
console.log(usage)
|
|
171
|
+
// { inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, modelCalls, costUsd }
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
Full API: [`docs/usage-tracking.md`](docs/usage-tracking.md).
|
|
175
|
+
|
|
176
|
+
## Compaction (opt-in)
|
|
177
|
+
|
|
178
|
+
When conversations get long, summarize older history into a synthetic system message and keep the last N turns verbatim:
|
|
179
|
+
|
|
180
|
+
```typescript
|
|
181
|
+
const agent = new MarcoAgent({
|
|
182
|
+
compaction: {
|
|
183
|
+
summaryModel: 'claude-haiku-4-5', // REQUIRED — pick a cheap model
|
|
184
|
+
summaryPrompt: 'Summarize the conversation, preserving facts learned and unresolved threads.', // REQUIRED
|
|
185
|
+
triggerAtInputTokens: 150_000, // optional, default 150_000
|
|
186
|
+
keepLastTurns: 4, // optional, default 4
|
|
187
|
+
},
|
|
188
|
+
})
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
Without `compaction` config, no compaction happens. `summaryModel` and `summaryPrompt` are required — the library can't know what models your API key supports or what shape of summary your domain needs.
|
|
192
|
+
|
|
193
|
+
Full API: [`docs/compaction.md`](docs/compaction.md).
|
|
194
|
+
|
|
195
|
+
## Reasoning models
|
|
196
|
+
|
|
197
|
+
Models that emit chain-of-thought (DeepSeek R1/V4-Pro, OpenAI o-series via OpenRouter) surface their reasoning separately from the final text:
|
|
198
|
+
|
|
199
|
+
```typescript
|
|
200
|
+
const { text, reasoning } = await agent.ask('Hard logic problem.')
|
|
201
|
+
console.log(text) // the visible answer
|
|
202
|
+
console.log(reasoning) // the model's hidden thinking, if any
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
Heads up: reasoning models burn output tokens on hidden CoT. Bump `maxTokens` to ~4-8k for them so the visible answer isn't starved.
|
|
206
|
+
|
|
207
|
+
## Web app integration (Next.js)
|
|
208
|
+
|
|
209
|
+
A complete streaming chat panel example lives in [`examples/nextjs/`](examples/nextjs/) — server route handler + client React component, plus notes on adapting it for project-specific tools.
|
|
210
|
+
|
|
98
211
|
## License
|
|
99
212
|
|
|
100
213
|
MIT
|
package/dist/bin.js
CHANGED
|
File without changes
|
package/dist/mcp.d.ts
CHANGED
|
@@ -5,6 +5,7 @@ export type FromMcpServerOptions = {
|
|
|
5
5
|
contextArgs?: Record<string, unknown>;
|
|
6
6
|
include?: string[];
|
|
7
7
|
exclude?: string[];
|
|
8
|
+
listParams?: Record<string, unknown>;
|
|
8
9
|
fetch?: typeof globalThis.fetch;
|
|
9
10
|
};
|
|
10
11
|
export declare function fromMcpServer(opts: FromMcpServerOptions): Promise<Tool[]>;
|
package/dist/mcp.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"mcp.d.ts","sourceRoot":"","sources":["../src/mcp.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,eAAe,CAAA;AAEzC,MAAM,MAAM,oBAAoB,GAAG;IACjC,GAAG,EAAE,MAAM,CAAA;IACX,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAA;IAChC,WAAW,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;IACrC,OAAO,CAAC,EAAE,MAAM,EAAE,CAAA;IAClB,OAAO,CAAC,EAAE,MAAM,EAAE,CAAA;
|
|
1
|
+
{"version":3,"file":"mcp.d.ts","sourceRoot":"","sources":["../src/mcp.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,eAAe,CAAA;AAEzC,MAAM,MAAM,oBAAoB,GAAG;IACjC,GAAG,EAAE,MAAM,CAAA;IACX,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAA;IAChC,WAAW,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;IACrC,OAAO,CAAC,EAAE,MAAM,EAAE,CAAA;IAClB,OAAO,CAAC,EAAE,MAAM,EAAE,CAAA;IAKlB,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;IAEpC,KAAK,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAA;CAChC,CAAA;AAoDD,wBAAsB,aAAa,CAAC,IAAI,EAAE,oBAAoB,GAAG,OAAO,CAAC,IAAI,EAAE,CAAC,CA2B/E"}
|
package/dist/mcp.js
CHANGED
|
@@ -42,7 +42,7 @@ export async function fromMcpServer(opts) {
|
|
|
42
42
|
const fetchImpl = opts.fetch ?? globalThis.fetch;
|
|
43
43
|
if (!fetchImpl)
|
|
44
44
|
throw new Error('fromMcpServer: no fetch implementation available');
|
|
45
|
-
const list = await jsonRpc(opts.url, 'tools/list', {}, opts.headers, fetchImpl);
|
|
45
|
+
const list = await jsonRpc(opts.url, 'tools/list', opts.listParams ?? {}, opts.headers, fetchImpl);
|
|
46
46
|
let tools = list.tools ?? [];
|
|
47
47
|
if (opts.include)
|
|
48
48
|
tools = tools.filter((t) => opts.include.includes(t.name));
|
package/dist/mcp.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"mcp.js","sourceRoot":"","sources":["../src/mcp.ts"],"names":[],"mappings":"AAAA,6CAA6C;AAC7C,EAAE;AACF,+EAA+E;AAC/E,0EAA0E;AAC1E,4EAA4E;AAC5E,2CAA2C;AAC3C,EAAE;AACF,oEAAoE;AACpE,EAAE;AACF,sEAAsE;AACtE,4EAA4E;AAC5E,sEAAsE;AACtE,sCAAsC;
|
|
1
|
+
{"version":3,"file":"mcp.js","sourceRoot":"","sources":["../src/mcp.ts"],"names":[],"mappings":"AAAA,6CAA6C;AAC7C,EAAE;AACF,+EAA+E;AAC/E,0EAA0E;AAC1E,4EAA4E;AAC5E,2CAA2C;AAC3C,EAAE;AACF,oEAAoE;AACpE,EAAE;AACF,sEAAsE;AACtE,4EAA4E;AAC5E,sEAAsE;AACtE,sCAAsC;AAsCtC,IAAI,MAAM,GAAG,CAAC,CAAA;AAEd,KAAK,UAAU,OAAO,CACpB,GAAW,EACX,MAAc,EACd,MAAe,EACf,OAA2C,EAC3C,SAAkC;IAElC,MAAM,GAAG,GAAG,MAAM,SAAS,CAAC,GAAG,EAAE;QAC/B,MAAM,EAAE,MAAM;QACd,OAAO,EAAE,EAAE,cAAc,EAAE,kBAAkB,EAAE,GAAG,CAAC,OAAO,IAAI,EAAE,CAAC,EAAE;QACnE,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,EAAE,OAAO,EAAE,KAAK,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,MAAM,EAAE,MAAM,EAAE,CAAC;KACvE,CAAC,CAAA;IACF,IAAI,CAAC,GAAG,CAAC,EAAE;QAAE,MAAM,IAAI,KAAK,CAAC,YAAY,GAAG,CAAC,MAAM,KAAK,MAAM,GAAG,CAAC,IAAI,EAAE,EAAE,CAAC,CAAA;IAC3E,MAAM,IAAI,GAAG,CAAC,MAAM,GAAG,CAAC,IAAI,EAAE,CAAuB,CAAA;IACrD,IAAI,IAAI,CAAC,KAAK;QAAE,MAAM,IAAI,KAAK,CAAC,kBAAkB,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE,CAAC,CAAA;IACvE,IAAI,IAAI,CAAC,MAAM,KAAK,SAAS;QAAE,MAAM,IAAI,KAAK,CAAC,uBAAuB,CAAC,CAAA;IACvE,OAAO,IAAI,CAAC,MAAM,CAAA;AACpB,CAAC;AAED,SAAS,YAAY,CAAC,CAAgB;IACpC,IAAI,CAAC,CAAC,OAAO,EAAE,CAAC;QACd,MAAM,GAAG,GAAG,CAAC,CAAC,OAAO,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC,EAAE,IAAI,IAAI,yBAAyB,CAAA;QACxF,MAAM,IAAI,KAAK,CAAC,GAAG,CAAC,CAAA;IACtB,CAAC;IACD,IAAI,CAAC,CAAC,iBAAiB,KAAK,SAAS;QAAE,OAAO,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAA;IACjF,IAAI,CAAC,CAAC,OAAO;QAAE,OAAO,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,IAAI,EAAE,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,EAAE,CAAA;IAC1E,OAAO,EAAE,CAAA;AACX,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,aAAa,CAAC,IAA0B;IAC5D,MAAM,SAAS,GAAG,IAAI,CAAC,KAAK,IAAI,UAAU,CAAC,KAAK,CAAA;IAChD,IAAI,CAAC,SAAS;QAAE,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAA;IAEnF,MAAM,IAAI,GAAG,MAAM,OAAO,CACxB,IAAI,CAAC,GAAG,EAAE,YAAY,EAAE,IAAI,CAAC,UAAU,IAAI,EAAE,EAAE,IAAI,CAAC,OAAO,EAAE,SAAS,CACvE,CAAA;IAED,IAAI,KAAK,GAAG,IAAI,CAAC,KAAK,IAAI,EAAE,CAAA;IAC5B,IAAI,IAAI,CAAC,OAAO;QAAE,KAAK,GAAG,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,OAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAA;IAC7E,IAAI,IAAI,CAAC,OAAO;QAAE,KAAK,GAAG,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,IAAI,CAAC,OAAQ,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAA;IAE9E,OAAO,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,EAAQ,EAAE,CAAC,CAAC;QAC7B,IAAI,EAAE,CAAC,CAAC,IAAI;QACZ,WAAW,EAAE,CAAC,CAAC,WAAW,IAAI,EAAE;QAChC,eAAe,EAAE,CAAC,CAAC,WAAW,IAAI,EAAE,IAAI,EAAE,QAAQ,EAAE,UAAU,EAAE,EAAE,EAAE;QACpE,4EAA4E;QAC5E,QAAQ,EAAE,CAAC,KAAK,EAAE,EAAE,CAAC,KAAK;QAC1B,OAAO,EAAE,KAAK,EAAE,KAAK,EAAE,EAAE;YACvB,MAAM,SAAS,GAAG,CAAC,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,KAAK,IAAI,CAAC,CAAC,CAAC,CAAC,KAAgC,CAAC,CAAC,CAAC,EAAE,CAAA;YACvG,MAAM,IAAI,GAAG,EAAE,GAAG,SAAS,EAAE,GAAG,CAAC,IAAI,CAAC,WAAW,IAAI,EAAE,CAAC,EAAE,CAAA;YAC1D,MAAM,MAAM,GAAG,MAAM,OAAO,CAC1B,IAAI,CAAC,GAAG,EAAE,YAAY,EAAE,EAAE,IAAI,EAAE,CAAC,CAAC,IAAI,EAAE,SAAS,EAAE,IAAI,EAAE,EAAE,IAAI,CAAC,OAAO,EAAE,SAAS,CACnF,CAAA;YACD,OAAO,YAAY,CAAC,MAAM,CAAC,CAAA;QAC7B,CAAC;KACF,CAAC,CAAC,CAAA;AACL,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "marco-agent",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.1",
|
|
4
4
|
"description": "A simple, extensible AI agent built on marco-harness.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -49,5 +49,13 @@
|
|
|
49
49
|
"marco-harness"
|
|
50
50
|
],
|
|
51
51
|
"author": "Karan Kochar <karankochar13@gmail.com> (https://karankochar.dev)",
|
|
52
|
-
"license": "MIT"
|
|
52
|
+
"license": "MIT",
|
|
53
|
+
"repository": {
|
|
54
|
+
"type": "git",
|
|
55
|
+
"url": "git+https://github.com/pyrotank41/marco-agent.git"
|
|
56
|
+
},
|
|
57
|
+
"bugs": {
|
|
58
|
+
"url": "https://github.com/pyrotank41/marco-agent/issues"
|
|
59
|
+
},
|
|
60
|
+
"homepage": "https://github.com/pyrotank41/marco-agent#readme"
|
|
53
61
|
}
|