@scalar/agent 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +166 -0
- package/dist/index.d.ts +158 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +120965 -0
- package/dist/index.js.map +7 -0
- package/package.json +22 -0
package/README.md
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# Scalar Agent SDK
|
|
2
|
+
|
|
3
|
+
Connect your AI agent to Scalar's OpenAPI MCP servers. Provides native integrations for the Vercel AI SDK, OpenAI Agents SDK, and Anthropic Claude SDK.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm i @scalar/agent
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pnpm i @scalar/agent
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
bun i @scalar/agent
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Setup
|
|
20
|
+
|
|
21
|
+
```ts
|
|
22
|
+
import { agentScalar } from '@scalar/agent'
|
|
23
|
+
|
|
24
|
+
const scalar = agentScalar({
|
|
25
|
+
agentKey: 'your-agent-key',
|
|
26
|
+
baseUrl: 'https://services.scalar.com',
|
|
27
|
+
})
|
|
28
|
+
|
|
29
|
+
const session = await scalar.session({
|
|
30
|
+
apis: [{ namespace: 'scalar', slug: 'galaxy' }],
|
|
31
|
+
})
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Providers
|
|
35
|
+
|
|
36
|
+
### Vercel AI SDK
|
|
37
|
+
|
|
38
|
+
Uses `@ai-sdk/mcp` natively. Returns a tool set ready for `generateText` / `streamText`.
|
|
39
|
+
|
|
40
|
+
```ts
|
|
41
|
+
import { generateText } from 'ai'
|
|
42
|
+
import { openai } from '@ai-sdk/openai'
|
|
43
|
+
|
|
44
|
+
const scalar = agentScalar({
|
|
45
|
+
agentKey: 'your-agent-key',
|
|
46
|
+
baseUrl: 'https://services.scalar.com',
|
|
47
|
+
})
|
|
48
|
+
|
|
49
|
+
const session = await scalar.session({
|
|
50
|
+
apis: [{ namespace: 'scalar', slug: 'galaxy' }],
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
const tools = await session.createVercelAITools()
|
|
54
|
+
|
|
55
|
+
const { text } = await generateText({
|
|
56
|
+
model: openai('gpt-4o'),
|
|
57
|
+
tools,
|
|
58
|
+
prompt: 'List the available endpoints in the Galaxy API',
|
|
59
|
+
})
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### OpenAI Agents SDK
|
|
63
|
+
|
|
64
|
+
Returns `MCPServerStreamableHttpOptions[]` — pass each entry to `new MCPServerStreamableHttp()` from `@openai/agents`. The agent runtime handles tool discovery and execution natively.
|
|
65
|
+
|
|
66
|
+
```ts
|
|
67
|
+
import { Agent, MCPServerStreamableHttp, run } from '@openai/agents'
|
|
68
|
+
|
|
69
|
+
const scalar = agentScalar({
|
|
70
|
+
agentKey: 'your-agent-key',
|
|
71
|
+
baseUrl: 'https://services.scalar.com',
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
const session = await scalar.session({
|
|
75
|
+
apis: [{ namespace: 'scalar', slug: 'galaxy' }],
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
const serverOptions = session.createOpenAIMCPServerOptions()
|
|
79
|
+
const servers = serverOptions.map((opts) => new MCPServerStreamableHttp(opts))
|
|
80
|
+
await Promise.all(servers.map((s) => s.connect()))
|
|
81
|
+
|
|
82
|
+
const agent = new Agent({
|
|
83
|
+
name: 'api-agent',
|
|
84
|
+
instructions: 'You help users interact with APIs.',
|
|
85
|
+
mcpServers: servers,
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
const result = await run(
|
|
89
|
+
agent,
|
|
90
|
+
'List the available endpoints in the Galaxy API',
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
await Promise.all(servers.map((s) => s.close()))
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### Anthropic Claude SDK
|
|
97
|
+
|
|
98
|
+
Uses `@modelcontextprotocol/sdk` directly. Returns `tools` (ready for `messages.create`) and `executeTool` (for handling `tool_use` response blocks).
|
|
99
|
+
|
|
100
|
+
```ts
|
|
101
|
+
import Anthropic from '@anthropic-ai/sdk'
|
|
102
|
+
|
|
103
|
+
const scalar = agentScalar({
|
|
104
|
+
agentKey: 'your-agent-key',
|
|
105
|
+
baseUrl: 'https://services.scalar.com',
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
const session = await scalar.session({
|
|
109
|
+
apis: [{ namespace: 'scalar', slug: 'galaxy' }],
|
|
110
|
+
})
|
|
111
|
+
|
|
112
|
+
const client = new Anthropic()
|
|
113
|
+
const { tools, executeTool } = await session.createAnthropicTools()
|
|
114
|
+
|
|
115
|
+
const messages: Anthropic.MessageParam[] = [
|
|
116
|
+
{ role: 'user', content: 'List the available endpoints in the Galaxy API' },
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
// Agentic loop
|
|
120
|
+
while (true) {
|
|
121
|
+
const response = await client.messages.create({
|
|
122
|
+
model: 'claude-opus-4-6',
|
|
123
|
+
max_tokens: 4096,
|
|
124
|
+
tools,
|
|
125
|
+
messages,
|
|
126
|
+
})
|
|
127
|
+
|
|
128
|
+
messages.push({ role: 'assistant', content: response.content })
|
|
129
|
+
|
|
130
|
+
if (response.stop_reason !== 'tool_use') break
|
|
131
|
+
|
|
132
|
+
const toolResults: Anthropic.ToolResultBlockParam[] = await Promise.all(
|
|
133
|
+
response.content
|
|
134
|
+
.filter(
|
|
135
|
+
(block): block is Anthropic.ToolUseBlock => block.type === 'tool_use',
|
|
136
|
+
)
|
|
137
|
+
.map(async (block) => ({
|
|
138
|
+
type: 'tool_result',
|
|
139
|
+
tool_use_id: block.id,
|
|
140
|
+
content: await executeTool(
|
|
141
|
+
block.name,
|
|
142
|
+
block.input as Record<string, unknown>,
|
|
143
|
+
),
|
|
144
|
+
})),
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
messages.push({ role: 'user', content: toolResults })
|
|
148
|
+
}
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## Configuration
|
|
152
|
+
|
|
153
|
+
| Option | Type | Description |
|
|
154
|
+
| ---------- | -------- | ---------------------------------------------------------------------- |
|
|
155
|
+
| `agentKey` | `string` | Scalar agent key, sent as the authorization request header |
|
|
156
|
+
| `baseUrl` | `string` | Base URL of the Scalar MCP server. Defaults to the staging environment |
|
|
157
|
+
|
|
158
|
+
## How it works
|
|
159
|
+
|
|
160
|
+
Each Scalar API is exposed as an MCP (Model Context Protocol) server. The SDK connects to these servers and surfaces their tools to your AI framework of choice. Every API exposes three tools:
|
|
161
|
+
|
|
162
|
+
- **`get-openapi-specs-summary`** — lists the available endpoints
|
|
163
|
+
- **`get-mini-openapi-spec`** — fetches a trimmed OpenAPI spec for specific endpoints
|
|
164
|
+
- **`execute-request`** — executes an HTTP request against the API
|
|
165
|
+
|
|
166
|
+
Tools are namespaced by API to avoid collisions when connecting multiple APIs at once: `{namespace}{delimiter}{slug}__{toolName}`.
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import type Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import z from 'zod';
|
|
3
|
+
export interface AgentSDKConfig {
|
|
4
|
+
/** Base URL of the Scalar MCP server. Defaults to the Scalar staging environment. */
|
|
5
|
+
baseUrl?: string;
|
|
6
|
+
/** Sent as the `x-scalar-agent-key` request header. */
|
|
7
|
+
agentKey?: string;
|
|
8
|
+
}
|
|
9
|
+
type Api = {
|
|
10
|
+
namespace: string;
|
|
11
|
+
slug: string;
|
|
12
|
+
};
|
|
13
|
+
export type MCPServerOptions = {
|
|
14
|
+
url: string;
|
|
15
|
+
name: string;
|
|
16
|
+
fetch: (url: RequestInfo | URL, options?: RequestInit) => Promise<Response>;
|
|
17
|
+
};
|
|
18
|
+
export declare const EXECUTE_REQUEST_TOOL_NAME = "execute-request";
|
|
19
|
+
export declare const executeRequestToolInputSchema: z.ZodObject<{
|
|
20
|
+
method: z.ZodString;
|
|
21
|
+
path: z.ZodString;
|
|
22
|
+
headers: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
23
|
+
body: z.ZodOptional<z.ZodString>;
|
|
24
|
+
}, z.core.$strip>;
|
|
25
|
+
export declare const mcpToolSchemas: {
|
|
26
|
+
"execute-request": {
|
|
27
|
+
inputSchema: z.ZodObject<{
|
|
28
|
+
method: z.ZodString;
|
|
29
|
+
path: z.ZodString;
|
|
30
|
+
headers: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
31
|
+
body: z.ZodOptional<z.ZodString>;
|
|
32
|
+
}, z.core.$strip>;
|
|
33
|
+
};
|
|
34
|
+
"get-mini-openapi-spec": {
|
|
35
|
+
inputSchema: z.ZodObject<{
|
|
36
|
+
question: z.ZodString;
|
|
37
|
+
}, z.core.$strip>;
|
|
38
|
+
};
|
|
39
|
+
"get-openapi-specs-summary": {
|
|
40
|
+
inputSchema: z.ZodObject<{}, z.core.$strip>;
|
|
41
|
+
};
|
|
42
|
+
};
|
|
43
|
+
/**
|
|
44
|
+
* Creates a Scalar agent client configured to connect to Scalar's MCP servers.
|
|
45
|
+
*
|
|
46
|
+
* @example
|
|
47
|
+
* ```ts
|
|
48
|
+
* const scalar = agentScalar({ agentKey: 'your-agent-key' })
|
|
49
|
+
* ```
|
|
50
|
+
*/
|
|
51
|
+
export declare const agentScalar: (config?: AgentSDKConfig) => {
|
|
52
|
+
session: ({ apis }: {
|
|
53
|
+
apis: Api[];
|
|
54
|
+
}) => Promise<{
|
|
55
|
+
/**
|
|
56
|
+
* Returns tools in Vercel AI SDK format.
|
|
57
|
+
* Uses `@ai-sdk/mcp` natively.
|
|
58
|
+
*
|
|
59
|
+
* @example
|
|
60
|
+
* ```ts
|
|
61
|
+
* import { agentScalar } from '@scalar/agent'
|
|
62
|
+
* import { generateText } from 'ai'
|
|
63
|
+
* import { openai } from '@ai-sdk/openai'
|
|
64
|
+
*
|
|
65
|
+
* const scalar = agentScalar({ agentKey: 'your-agent-key' })
|
|
66
|
+
* const session = await scalar.session({ apis: [{ namespace: 'scalar', slug: 'galaxy' }] })
|
|
67
|
+
*
|
|
68
|
+
* const tools = await session.createVercelAITools()
|
|
69
|
+
*
|
|
70
|
+
* const { text } = await generateText({
|
|
71
|
+
* model: openai('gpt-4o'),
|
|
72
|
+
* tools,
|
|
73
|
+
* prompt: 'List the available endpoints in the Petstore API',
|
|
74
|
+
* })
|
|
75
|
+
* ```
|
|
76
|
+
*/
|
|
77
|
+
createVercelAITools: () => Promise<any>;
|
|
78
|
+
/**
|
|
79
|
+
* Returns options for creating `MCPServerStreamableHttp` instances via `@openai/agents`.
|
|
80
|
+
* The agent handles tool discovery and execution natively.
|
|
81
|
+
*
|
|
82
|
+
* @example
|
|
83
|
+
* ```ts
|
|
84
|
+
* import { agentScalar } from '@scalar/agent'
|
|
85
|
+
* import { Agent, MCPServerStreamableHttp, run } from '@openai/agents'
|
|
86
|
+
*
|
|
87
|
+
* const scalar = agentScalar({ agentKey: 'your-agent-key' })
|
|
88
|
+
* const session = await scalar.session({ apis: [{ namespace: 'scalar', slug: 'galaxy' }] })
|
|
89
|
+
*
|
|
90
|
+
* const servers = (await session.createOpenAIMCP()).map((opts) => new MCPServerStreamableHttp(opts))
|
|
91
|
+
* await Promise.all(servers.map((s) => s.connect()))
|
|
92
|
+
*
|
|
93
|
+
* const agent = new Agent({
|
|
94
|
+
* name: 'api-agent',
|
|
95
|
+
* instructions: 'You help users interact with APIs.',
|
|
96
|
+
* mcpServers: servers,
|
|
97
|
+
* })
|
|
98
|
+
*
|
|
99
|
+
* const result = await run(agent, 'List the available endpoints in the Petstore API')
|
|
100
|
+
* await Promise.all(servers.map((s) => s.close()))
|
|
101
|
+
* ```
|
|
102
|
+
*/
|
|
103
|
+
createOpenAIMCP: () => Promise<MCPServerOptions[]>;
|
|
104
|
+
/**
|
|
105
|
+
* Returns tools in `@anthropic-ai/sdk` format.
|
|
106
|
+
* Uses `@modelcontextprotocol/sdk` Client directly — `listTools()` already returns
|
|
107
|
+
* JSON Schema so no conversion needed. Tool calls are routed back via `callTool()`.
|
|
108
|
+
*
|
|
109
|
+
* @example
|
|
110
|
+
* ```ts
|
|
111
|
+
* import { agentScalar } from '@scalar/agent'
|
|
112
|
+
* import Anthropic from '@anthropic-ai/sdk'
|
|
113
|
+
*
|
|
114
|
+
* const scalar = agentScalar({ agentKey: 'your-agent-key' })
|
|
115
|
+
* const session = await scalar.session({ apis: [{ namespace: 'scalar', slug: 'galaxy' }] })
|
|
116
|
+
*
|
|
117
|
+
* const client = new Anthropic()
|
|
118
|
+
* const { tools, executeTool } = await session.createAnthropicTools()
|
|
119
|
+
*
|
|
120
|
+
* const messages: Anthropic.MessageParam[] = [
|
|
121
|
+
* { role: 'user', content: 'List the available endpoints in the Petstore API' },
|
|
122
|
+
* ]
|
|
123
|
+
*
|
|
124
|
+
* // Agentic loop
|
|
125
|
+
* while (true) {
|
|
126
|
+
* const response = await client.messages.create({
|
|
127
|
+
* model: 'claude-opus-4-6',
|
|
128
|
+
* max_tokens: 4096,
|
|
129
|
+
* tools,
|
|
130
|
+
* messages,
|
|
131
|
+
* })
|
|
132
|
+
*
|
|
133
|
+
* messages.push({ role: 'assistant', content: response.content })
|
|
134
|
+
*
|
|
135
|
+
* if (response.stop_reason !== 'tool_use') break
|
|
136
|
+
*
|
|
137
|
+
* const toolResults = await Promise.all(
|
|
138
|
+
* response.content
|
|
139
|
+
* .filter((b): b is Anthropic.ToolUseBlock => b.type === 'tool_use')
|
|
140
|
+
* .map(async (b) => ({
|
|
141
|
+
* type: 'tool_result' as const,
|
|
142
|
+
* tool_use_id: b.id,
|
|
143
|
+
* content: await executeTool(b.name, b.input as Record<string, unknown>),
|
|
144
|
+
* })),
|
|
145
|
+
* )
|
|
146
|
+
*
|
|
147
|
+
* messages.push({ role: 'user', content: toolResults })
|
|
148
|
+
* }
|
|
149
|
+
* ```
|
|
150
|
+
*/
|
|
151
|
+
createAnthropicTools: () => Promise<{
|
|
152
|
+
tools: Anthropic.Tool[];
|
|
153
|
+
executeTool: (toolName: string, input: Record<string, unknown>) => Promise<string>;
|
|
154
|
+
}>;
|
|
155
|
+
}>;
|
|
156
|
+
};
|
|
157
|
+
export {};
|
|
158
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,SAAS,MAAM,mBAAmB,CAAA;AAG9C,OAAO,CAAC,MAAM,KAAK,CAAA;AAUnB,MAAM,WAAW,cAAc;IAC7B,qFAAqF;IACrF,OAAO,CAAC,EAAE,MAAM,CAAA;IAChB,uDAAuD;IACvD,QAAQ,CAAC,EAAE,MAAM,CAAA;CAClB;AAED,KAAK,GAAG,GAAG;IAAE,SAAS,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAE,CAAA;AAE9C,MAAM,MAAM,gBAAgB,GAAG;IAC7B,GAAG,EAAE,MAAM,CAAA;IACX,IAAI,EAAE,MAAM,CAAA;IACZ,KAAK,EAAE,CAAC,GAAG,EAAE,WAAW,GAAG,GAAG,EAAE,OAAO,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAA;CAC5E,CAAA;AAED,eAAO,MAAM,yBAAyB,oBAAoB,CAAA;AAE1D,eAAO,MAAM,6BAA6B;;;;;iBAKxC,CAAA;AAEF,eAAO,MAAM,cAAc;;;;;;;;;;;;;;;;;CAM1B,CAAA;AA2DD;;;;;;;GAOG;AACH,eAAO,MAAM,WAAW,GAAI,SAAQ,cAAmB;wBAuBpB;QAAE,IAAI,EAAE,GAAG,EAAE,CAAA;KAAE;QAS5C;;;;;;;;;;;;;;;;;;;;;WAqBG;;QA6BH;;;;;;;;;;;;;;;;;;;;;;;;WAwBG;+BACwB,OAAO,CAAC,gBAAgB,EAAE,CAAC;QAStD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;WA8CG;oCAC6B,OAAO,CAAC;YACtC,KAAK,EAAE,SAAS,CAAC,IAAI,EAAE,CAAA;YACvB,WAAW,EAAE,CACX,QAAQ,EAAE,MAAM,EAChB,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC3B,OAAO,CAAC,MAAM,CAAC,CAAA;SACrB,CAAC;;CAyEP,CAAA"}
|