agentv 0.13.0 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -6
- package/dist/chunk-7XYYGJAC.js +86 -0
- package/dist/chunk-7XYYGJAC.js.map +1 -0
- package/dist/chunk-D44RV4HN.js +258 -0
- package/dist/chunk-D44RV4HN.js.map +1 -0
- package/dist/chunk-LVLBPRCV.js +40830 -0
- package/dist/chunk-LVLBPRCV.js.map +1 -0
- package/dist/cli.js +2 -1
- package/dist/cli.js.map +1 -1
- package/dist/index.js +2 -1
- package/dist/templates/.claude/skills/agentv-eval-builder/SKILL.md +5 -53
- package/dist/templates/.claude/skills/agentv-eval-builder/references/example-evals.md +36 -171
- package/dist/templates/.claude/skills/agentv-prompt-optimizer/SKILL.md +10 -11
- package/dist/templates/.github/prompts/agentv-optimize.prompt.md +1 -1
- package/dist/token-O4PKPL7Y.js +63 -0
- package/dist/token-O4PKPL7Y.js.map +1 -0
- package/dist/token-util-SOXXDYPK.js +6 -0
- package/dist/token-util-SOXXDYPK.js.map +1 -0
- package/package.json +2 -2
- package/dist/chunk-WMO5PVPX.js +0 -18084
- package/dist/chunk-WMO5PVPX.js.map +0 -1
- package/dist/templates/agentv/.env.template +0 -23
package/dist/cli.js
CHANGED
package/dist/cli.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/cli.ts"],"sourcesContent":["#!/usr/bin/env node\r\nimport { runCli } from './index.js';\r\n\r\nvoid runCli();\r\n"],"mappings":"
|
|
1
|
+
{"version":3,"sources":["../src/cli.ts"],"sourcesContent":["#!/usr/bin/env node\r\nimport { runCli } from './index.js';\r\n\r\nvoid runCli();\r\n"],"mappings":";;;;;;;AAGA,KAAK,OAAO;","names":[]}
|
package/dist/index.js
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
---
|
|
2
|
-
name: eval-builder
|
|
2
|
+
name: agentv-eval-builder
|
|
3
3
|
description: Create and maintain AgentV YAML evaluation files for testing AI agent performance. Use this skill when creating new eval files, adding eval cases, or configuring custom evaluators (code validators or LLM judges) for agent testing workflows.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
# Eval Builder
|
|
6
|
+
# AgentV Eval Builder
|
|
7
7
|
|
|
8
8
|
## Schema Reference
|
|
9
9
|
- Schema: `references/eval-schema.json` (JSON Schema for validation and tooling)
|
|
@@ -11,7 +11,7 @@ description: Create and maintain AgentV YAML evaluation files for testing AI age
|
|
|
11
11
|
- Examples: `references/example-evals.md`
|
|
12
12
|
|
|
13
13
|
## Structure Requirements
|
|
14
|
-
- Root level: `$schema` (required: "agentv-eval-v2"), `description` (optional), `
|
|
14
|
+
- Root level: `$schema` (required: "agentv-eval-v2"), `description` (optional), `execution` (optional), `evalcases` (required)
|
|
15
15
|
- Eval case fields: `id` (required), `outcome` (required), `input_messages` (required), `expected_messages` (required)
|
|
16
16
|
- Optional fields: `conversation_id`, `note`, `execution`
|
|
17
17
|
- Message fields: `role` (required), `content` (required)
|
|
@@ -71,10 +71,10 @@ execution:
|
|
|
71
71
|
```yaml
|
|
72
72
|
$schema: agentv-eval-v2
|
|
73
73
|
description: Example showing basic features and conversation threading
|
|
74
|
-
|
|
74
|
+
execution:
|
|
75
|
+
target: default
|
|
75
76
|
|
|
76
77
|
evalcases:
|
|
77
|
-
# Basic eval case with file references
|
|
78
78
|
- id: code-review-basic
|
|
79
79
|
outcome: Assistant provides helpful code analysis
|
|
80
80
|
|
|
@@ -101,52 +101,4 @@ evalcases:
|
|
|
101
101
|
- Add type hints: `def add(a: int, b: int) -> int:`
|
|
102
102
|
- Add docstring
|
|
103
103
|
- Consider validation for edge cases
|
|
104
|
-
|
|
105
|
-
# Advanced: conversation threading, multiple evaluators
|
|
106
|
-
- id: python-coding-session
|
|
107
|
-
conversation_id: python-coding-session
|
|
108
|
-
outcome: Generates correct code with proper error handling
|
|
109
|
-
|
|
110
|
-
execution:
|
|
111
|
-
target: azure_base
|
|
112
|
-
evaluators:
|
|
113
|
-
- name: keyword_check
|
|
114
|
-
type: code
|
|
115
|
-
script: uv run check_keywords.py
|
|
116
|
-
cwd: /evaluators/scripts
|
|
117
|
-
- name: semantic_judge
|
|
118
|
-
type: llm_judge
|
|
119
|
-
prompt: /evaluators/prompts/correctness.md
|
|
120
|
-
model: gpt-5-chat
|
|
121
|
-
|
|
122
|
-
input_messages:
|
|
123
|
-
- role: system
|
|
124
|
-
content: You are a code generator.
|
|
125
|
-
- role: user
|
|
126
|
-
content:
|
|
127
|
-
- type: text
|
|
128
|
-
value: Create a function to find the second largest number in a list.
|
|
129
|
-
- type: file
|
|
130
|
-
value: /prompts/python.instructions.md
|
|
131
|
-
|
|
132
|
-
expected_messages:
|
|
133
|
-
- role: assistant
|
|
134
|
-
content: |-
|
|
135
|
-
```python
|
|
136
|
-
from typing import List, Union
|
|
137
|
-
|
|
138
|
-
def find_second_largest(numbers: List[int]) -> Union[int, None]:
|
|
139
|
-
"""Find the second largest number."""
|
|
140
|
-
if not isinstance(numbers, list):
|
|
141
|
-
raise TypeError("Input must be a list")
|
|
142
|
-
if not numbers:
|
|
143
|
-
raise ValueError("List cannot be empty")
|
|
144
|
-
|
|
145
|
-
unique = list(set(numbers))
|
|
146
|
-
if len(unique) < 2:
|
|
147
|
-
return None
|
|
148
|
-
|
|
149
|
-
unique.sort(reverse=True)
|
|
150
|
-
return unique[1]
|
|
151
|
-
```
|
|
152
104
|
```
|
|
@@ -102,200 +102,65 @@ evalcases:
|
|
|
102
102
|
}
|
|
103
103
|
```
|
|
104
104
|
|
|
105
|
-
## Conversation
|
|
105
|
+
## Multi-Turn Conversation (Single Eval Case)
|
|
106
106
|
|
|
107
107
|
```yaml
|
|
108
108
|
$schema: agentv-eval-v2
|
|
109
|
-
description: Multi-turn
|
|
109
|
+
description: Multi-turn debugging session with clarifying questions
|
|
110
110
|
target: default
|
|
111
111
|
|
|
112
112
|
evalcases:
|
|
113
|
-
- id:
|
|
114
|
-
|
|
115
|
-
|
|
113
|
+
- id: debug-with-clarification
|
|
114
|
+
outcome: |-
|
|
115
|
+
Assistant conducts a multi-turn debugging session, asking clarification
|
|
116
|
+
questions when needed, correctly diagnosing the bug, and proposing a clear
|
|
117
|
+
fix with rationale.
|
|
116
118
|
|
|
117
119
|
input_messages:
|
|
120
|
+
- role: system
|
|
121
|
+
content: You are an expert debugging assistant who reasons step by step, asks clarifying questions, and explains fixes clearly.
|
|
118
122
|
- role: user
|
|
119
|
-
content: Create a Python User class with name and email attributes
|
|
120
|
-
|
|
121
|
-
expected_messages:
|
|
122
|
-
- role: assistant
|
|
123
123
|
content: |-
|
|
124
|
+
I'm getting an off-by-one error in this function, but I can't see why:
|
|
125
|
+
|
|
124
126
|
```python
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
127
|
+
def get_items(items):
|
|
128
|
+
result = []
|
|
129
|
+
for i in range(len(items) - 1):
|
|
130
|
+
result.append(items[i])
|
|
131
|
+
return result
|
|
129
132
|
```
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
conversation_id: python-dev-session
|
|
133
|
-
outcome: Adds email validation to the existing class
|
|
134
|
-
|
|
135
|
-
input_messages:
|
|
136
|
-
- role: user
|
|
137
|
-
content: Add email validation to the User class
|
|
138
|
-
|
|
139
|
-
expected_messages:
|
|
133
|
+
|
|
134
|
+
Sometimes the last element is missing. Can you help debug this?
|
|
140
135
|
- role: assistant
|
|
141
136
|
content: |-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
class User:
|
|
146
|
-
def __init__(self, name, email):
|
|
147
|
-
self.name = name
|
|
148
|
-
self.email = self._validate_email(email)
|
|
149
|
-
|
|
150
|
-
def _validate_email(self, email):
|
|
151
|
-
pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
|
|
152
|
-
if not re.match(pattern, email):
|
|
153
|
-
raise ValueError(f"Invalid email: {email}")
|
|
154
|
-
return email
|
|
155
|
-
```
|
|
156
|
-
|
|
157
|
-
- id: add-repr
|
|
158
|
-
conversation_id: python-dev-session
|
|
159
|
-
outcome: Adds __repr__ method for better debugging
|
|
160
|
-
|
|
161
|
-
input_messages:
|
|
137
|
+
I can help debug this. Before I propose a fix, could you tell me:
|
|
138
|
+
- What output you expect for an example input list
|
|
139
|
+
- What output you actually get
|
|
162
140
|
- role: user
|
|
163
|
-
content:
|
|
141
|
+
content: |-
|
|
142
|
+
For `[1, 2, 3, 4]` I expect `[1, 2, 3, 4]`, but I get `[1, 2, 3]`.
|
|
164
143
|
|
|
165
144
|
expected_messages:
|
|
166
145
|
- role: assistant
|
|
167
146
|
content: |-
|
|
147
|
+
You have an off-by-one error in your loop bounds.
|
|
148
|
+
You're iterating with `range(len(items) - 1)`, which stops before the last index.
|
|
149
|
+
To include all items, you can either:
|
|
150
|
+
- Use `range(len(items))`, or
|
|
151
|
+
- Iterate directly over the list: `for item in items:`
|
|
152
|
+
|
|
153
|
+
Here's a corrected version:
|
|
154
|
+
|
|
168
155
|
```python
|
|
169
|
-
def
|
|
170
|
-
|
|
156
|
+
def get_items(items):
|
|
157
|
+
result = []
|
|
158
|
+
for item in items:
|
|
159
|
+
result.append(item)
|
|
160
|
+
return result
|
|
171
161
|
```
|
|
172
162
|
```
|
|
173
163
|
|
|
174
|
-
## Advanced: Per-Case Target Override
|
|
175
|
-
|
|
176
|
-
```yaml
|
|
177
|
-
$schema: agentv-eval-v2
|
|
178
|
-
description: Cross-platform testing with different targets
|
|
179
|
-
target: azure_base
|
|
180
|
-
|
|
181
|
-
evalcases:
|
|
182
|
-
- id: azure-test
|
|
183
|
-
outcome: Tests with Azure OpenAI (uses file-level default target)
|
|
184
|
-
|
|
185
|
-
input_messages:
|
|
186
|
-
- role: user
|
|
187
|
-
content: Explain async/await in Python
|
|
188
|
-
|
|
189
|
-
expected_messages:
|
|
190
|
-
- role: assistant
|
|
191
|
-
content: "async/await is Python's syntax for asynchronous programming..."
|
|
192
|
-
|
|
193
|
-
- id: vscode-test
|
|
194
|
-
outcome: Tests with VS Code Copilot
|
|
195
|
-
|
|
196
|
-
execution:
|
|
197
|
-
target: vscode_projectx
|
|
198
|
-
|
|
199
|
-
input_messages:
|
|
200
|
-
- role: user
|
|
201
|
-
content:
|
|
202
|
-
- type: text
|
|
203
|
-
value: Refactor this code to use async/await
|
|
204
|
-
- type: file
|
|
205
|
-
value: /code-samples/sync_code.py
|
|
206
|
-
|
|
207
|
-
expected_messages:
|
|
208
|
-
- role: assistant
|
|
209
|
-
content: "async def fetch_data():\n await ..."
|
|
210
|
-
```
|
|
211
|
-
|
|
212
|
-
## Complex Real-World Example: CargoWise Triage
|
|
213
|
-
|
|
214
|
-
```yaml
|
|
215
|
-
$schema: agentv-eval-v2
|
|
216
|
-
description: CargoWise criticality rating (CR1-CR9) classification eval
|
|
217
|
-
target: default
|
|
218
|
-
|
|
219
|
-
evalcases:
|
|
220
|
-
- id: cr-global-outage
|
|
221
|
-
conversation_id: cargowise-triage
|
|
222
|
-
|
|
223
|
-
outcome: |
|
|
224
|
-
Assistant correctly classifies as 'CR1' for complete system inaccessibility.
|
|
225
|
-
Reasoning should emphasize 'any user on any workstation' and lack of access.
|
|
226
|
-
|
|
227
|
-
execution:
|
|
228
|
-
evaluators:
|
|
229
|
-
- name: json_format_validator
|
|
230
|
-
type: code
|
|
231
|
-
script: uv run validate_output.py
|
|
232
|
-
- name: content_evaluator
|
|
233
|
-
type: llm_judge
|
|
234
|
-
|
|
235
|
-
input_messages:
|
|
236
|
-
- role: user
|
|
237
|
-
content:
|
|
238
|
-
- type: file
|
|
239
|
-
value: ../../prompts/cw-criticality-rating.prompt.md
|
|
240
|
-
- type: text
|
|
241
|
-
value: |-
|
|
242
|
-
Classify this CargoWise ticket:
|
|
243
|
-
|
|
244
|
-
Ticket: Entire system down, no users can log in from any device
|
|
245
|
-
Impact: All operations halted
|
|
246
|
-
Scope: Global
|
|
247
|
-
Signals: Error: "Server unreachable", no recent changes reported
|
|
248
|
-
|
|
249
|
-
expected_messages:
|
|
250
|
-
- role: assistant
|
|
251
|
-
content: |-
|
|
252
|
-
{
|
|
253
|
-
"criticalityRating": "CR1",
|
|
254
|
-
"reasoning": "Step 1: Issue prevents access to the entire application suite.
|
|
255
|
-
Step 2: Affects any user on any workstation. Step 3: Matches CR1 definition
|
|
256
|
-
exactly—no partial access or workarounds mentioned."
|
|
257
|
-
}
|
|
258
|
-
|
|
259
|
-
- id: cr-module-inaccessible
|
|
260
|
-
conversation_id: cargowise-triage
|
|
261
|
-
|
|
262
|
-
outcome: |
|
|
263
|
-
Assistant correctly classifies as 'CR2' for module-wide inaccessibility.
|
|
264
|
-
Reasoning should distinguish from CR1 by noting it's limited to one module.
|
|
265
|
-
|
|
266
|
-
execution:
|
|
267
|
-
evaluators:
|
|
268
|
-
- name: json_format_validator
|
|
269
|
-
type: code
|
|
270
|
-
script: uv run validate_output.py
|
|
271
|
-
- name: content_evaluator
|
|
272
|
-
type: llm_judge
|
|
273
|
-
|
|
274
|
-
input_messages:
|
|
275
|
-
- role: user
|
|
276
|
-
content:
|
|
277
|
-
- type: file
|
|
278
|
-
value: ../../prompts/cw-criticality-rating.prompt.md
|
|
279
|
-
- type: text
|
|
280
|
-
value: |-
|
|
281
|
-
Classify this CargoWise ticket:
|
|
282
|
-
|
|
283
|
-
Ticket: Customs module inaccessible for all users
|
|
284
|
-
Impact: Customs declarations blocked
|
|
285
|
-
Scope: Module-wide
|
|
286
|
-
Signals: "Module not found" error, other modules working
|
|
287
|
-
|
|
288
|
-
expected_messages:
|
|
289
|
-
- role: assistant
|
|
290
|
-
content: |-
|
|
291
|
-
{
|
|
292
|
-
"criticalityRating": "CR2",
|
|
293
|
-
"reasoning": "Step 1: Issue affects an entire module (Customs).
|
|
294
|
-
Step 2: No access for any user/workstation. Step 3: Does not impact
|
|
295
|
-
the full suite, so CR2 over CR1."
|
|
296
|
-
}
|
|
297
|
-
```
|
|
298
|
-
|
|
299
164
|
## Notes on Examples
|
|
300
165
|
|
|
301
166
|
### File Path Conventions
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
---
|
|
2
|
-
description: Iteratively optimize
|
|
2
|
+
description: Iteratively optimize prompt files against AgentV evaluation datasets by analyzing failures and refining instructions.
|
|
3
3
|
---
|
|
4
4
|
|
|
5
5
|
# AgentV Prompt Optimizer
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
`prompt-optimizer <eval-path> [playbook-path]`
|
|
7
|
+
## Input Variables
|
|
8
|
+
- `eval-path`: Path or glob pattern to the AgentV evaluation file(s) to optimize against
|
|
9
|
+
- `optimization-log-path` (optional): Path where optimization progress should be logged
|
|
11
10
|
|
|
12
11
|
## Workflow
|
|
13
12
|
|
|
@@ -17,9 +16,9 @@ Iteratively optimize a prompt file against an AgentV evaluation suite.
|
|
|
17
16
|
- Infer prompt files from the eval file content (look for `file:` references in `input_messages` that match these patterns).
|
|
18
17
|
- Recursively check referenced prompt files for *other* prompt references (dependencies).
|
|
19
18
|
- If multiple prompts are found, consider ALL of them as candidates for optimization.
|
|
20
|
-
- **Identify
|
|
21
|
-
- If `<
|
|
22
|
-
- If not, create a new one in the parent directory of the eval files: `
|
|
19
|
+
- **Identify Optimization Log**:
|
|
20
|
+
- If `<optimization-log-path>` is provided, use it.
|
|
21
|
+
- If not, create a new one in the parent directory of the eval files: `optimization-[timestamp].md`.
|
|
23
22
|
- Read content of the identified prompt file.
|
|
24
23
|
|
|
25
24
|
2. **Optimization Loop** (Max 5 iterations)
|
|
@@ -38,7 +37,7 @@ Iteratively optimize a prompt file against an AgentV evaluation suite.
|
|
|
38
37
|
- If **Score decreased**: Revert last change, try different approach.
|
|
39
38
|
- If **No improvement** (2x): STOP and report stagnation.
|
|
40
39
|
- **Log Result**:
|
|
41
|
-
- Append the result of this iteration to the identified
|
|
40
|
+
- Append the result of this iteration to the identified optimization log file.
|
|
42
41
|
- **Format**:
|
|
43
42
|
```markdown
|
|
44
43
|
### Iteration [N]
|
|
@@ -49,7 +48,7 @@ Iteratively optimize a prompt file against an AgentV evaluation suite.
|
|
|
49
48
|
```
|
|
50
49
|
- **Refine (The Curator)**:
|
|
51
50
|
- Modify the relevant `<prompt-file>` (pick the one most likely to be the root cause) to address failures.
|
|
52
|
-
- **Strategy**: Treat the prompt as a structured
|
|
51
|
+
- **Strategy**: Treat the prompt as a structured set of rules and instructions.
|
|
53
52
|
- **Clarify**: If ambiguous, make the existing instruction more specific.
|
|
54
53
|
- **Add Rule**: If a constraint was missed, add a specific bullet point to the relevant section.
|
|
55
54
|
- **Negative Constraint**: If hallucinating, explicitly state what NOT to do.
|
|
@@ -61,7 +60,7 @@ Iteratively optimize a prompt file against an AgentV evaluation suite.
|
|
|
61
60
|
3. **Completion**
|
|
62
61
|
- Report final score.
|
|
63
62
|
- Summarize key changes made to the prompt.
|
|
64
|
-
- **Finalize
|
|
63
|
+
- **Finalize Optimization Log**: Add a summary header to the optimization log file indicating the session completion and final score.
|
|
65
64
|
|
|
66
65
|
## Guidelines
|
|
67
66
|
- **Simplicity ("Less is More")**: Avoid adding specific rules for rare edge cases ("hotfixes"). Focus on universally applicable instructions.
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import {
|
|
2
|
+
require_token_util
|
|
3
|
+
} from "./chunk-D44RV4HN.js";
|
|
4
|
+
import {
|
|
5
|
+
__commonJS,
|
|
6
|
+
require_token_error
|
|
7
|
+
} from "./chunk-7XYYGJAC.js";
|
|
8
|
+
|
|
9
|
+
// ../../node_modules/.pnpm/@vercel+oidc@3.0.5/node_modules/@vercel/oidc/dist/token.js
|
|
10
|
+
var require_token = __commonJS({
|
|
11
|
+
"../../node_modules/.pnpm/@vercel+oidc@3.0.5/node_modules/@vercel/oidc/dist/token.js"(exports, module) {
|
|
12
|
+
var __defProp = Object.defineProperty;
|
|
13
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
14
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
15
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
16
|
+
var __export = (target, all) => {
|
|
17
|
+
for (var name in all)
|
|
18
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
19
|
+
};
|
|
20
|
+
var __copyProps = (to, from, except, desc) => {
|
|
21
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
22
|
+
for (let key of __getOwnPropNames(from))
|
|
23
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
24
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
25
|
+
}
|
|
26
|
+
return to;
|
|
27
|
+
};
|
|
28
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
|
+
var token_exports = {};
|
|
30
|
+
__export(token_exports, {
|
|
31
|
+
refreshToken: () => refreshToken
|
|
32
|
+
});
|
|
33
|
+
module.exports = __toCommonJS(token_exports);
|
|
34
|
+
var import_token_error = require_token_error();
|
|
35
|
+
var import_token_util = require_token_util();
|
|
36
|
+
async function refreshToken() {
|
|
37
|
+
const { projectId, teamId } = (0, import_token_util.findProjectInfo)();
|
|
38
|
+
let maybeToken = (0, import_token_util.loadToken)(projectId);
|
|
39
|
+
if (!maybeToken || (0, import_token_util.isExpired)((0, import_token_util.getTokenPayload)(maybeToken.token))) {
|
|
40
|
+
const authToken = (0, import_token_util.getVercelCliToken)();
|
|
41
|
+
if (!authToken) {
|
|
42
|
+
throw new import_token_error.VercelOidcTokenError(
|
|
43
|
+
"Failed to refresh OIDC token: login to vercel cli"
|
|
44
|
+
);
|
|
45
|
+
}
|
|
46
|
+
if (!projectId) {
|
|
47
|
+
throw new import_token_error.VercelOidcTokenError(
|
|
48
|
+
"Failed to refresh OIDC token: project id not found"
|
|
49
|
+
);
|
|
50
|
+
}
|
|
51
|
+
maybeToken = await (0, import_token_util.getVercelOidcToken)(authToken, projectId, teamId);
|
|
52
|
+
if (!maybeToken) {
|
|
53
|
+
throw new import_token_error.VercelOidcTokenError("Failed to refresh OIDC token");
|
|
54
|
+
}
|
|
55
|
+
(0, import_token_util.saveToken)(maybeToken, projectId);
|
|
56
|
+
}
|
|
57
|
+
process.env.VERCEL_OIDC_TOKEN = maybeToken.token;
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
export default require_token();
|
|
63
|
+
//# sourceMappingURL=token-O4PKPL7Y.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../../node_modules/.pnpm/@vercel+oidc@3.0.5/node_modules/@vercel/oidc/dist/token.js"],"sourcesContent":["\"use strict\";\nvar __defProp = Object.defineProperty;\nvar __getOwnPropDesc = Object.getOwnPropertyDescriptor;\nvar __getOwnPropNames = Object.getOwnPropertyNames;\nvar __hasOwnProp = Object.prototype.hasOwnProperty;\nvar __export = (target, all) => {\n for (var name in all)\n __defProp(target, name, { get: all[name], enumerable: true });\n};\nvar __copyProps = (to, from, except, desc) => {\n if (from && typeof from === \"object\" || typeof from === \"function\") {\n for (let key of __getOwnPropNames(from))\n if (!__hasOwnProp.call(to, key) && key !== except)\n __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });\n }\n return to;\n};\nvar __toCommonJS = (mod) => __copyProps(__defProp({}, \"__esModule\", { value: true }), mod);\nvar token_exports = {};\n__export(token_exports, {\n refreshToken: () => refreshToken\n});\nmodule.exports = __toCommonJS(token_exports);\nvar import_token_error = require(\"./token-error\");\nvar import_token_util = require(\"./token-util\");\nasync function refreshToken() {\n const { projectId, teamId } = (0, import_token_util.findProjectInfo)();\n let maybeToken = (0, import_token_util.loadToken)(projectId);\n if (!maybeToken || (0, import_token_util.isExpired)((0, import_token_util.getTokenPayload)(maybeToken.token))) {\n const authToken = (0, import_token_util.getVercelCliToken)();\n if (!authToken) {\n throw new import_token_error.VercelOidcTokenError(\n \"Failed to refresh OIDC token: login to vercel cli\"\n );\n }\n if (!projectId) {\n throw new import_token_error.VercelOidcTokenError(\n \"Failed to refresh OIDC token: project id not found\"\n );\n }\n maybeToken = await (0, import_token_util.getVercelOidcToken)(authToken, projectId, teamId);\n if (!maybeToken) {\n throw new import_token_error.VercelOidcTokenError(\"Failed to refresh OIDC token\");\n }\n (0, import_token_util.saveToken)(maybeToken, projectId);\n }\n process.env.VERCEL_OIDC_TOKEN = maybeToken.token;\n return;\n}\n// Annotate the CommonJS export names for ESM import in node:\n0 && (module.exports = {\n refreshToken\n});\n"],"mappings":";;;;;;;;;AAAA;AAAA;AACA,QAAI,YAAY,OAAO;AACvB,QAAI,mBAAmB,OAAO;AAC9B,QAAI,oBAAoB,OAAO;AAC/B,QAAI,eAAe,OAAO,UAAU;AACpC,QAAI,WAAW,CAAC,QAAQ,QAAQ;AAC9B,eAAS,QAAQ;AACf,kBAAU,QAAQ,MAAM,EAAE,KAAK,IAAI,IAAI,GAAG,YAAY,KAAK,CAAC;AAAA,IAChE;AACA,QAAI,cAAc,CAAC,IAAI,MAAM,QAAQ,SAAS;AAC5C,UAAI,QAAQ,OAAO,SAAS,YAAY,OAAO,SAAS,YAAY;AAClE,iBAAS,OAAO,kBAAkB,IAAI;AACpC,cAAI,CAAC,aAAa,KAAK,IAAI,GAAG,KAAK,QAAQ;AACzC,sBAAU,IAAI,KAAK,EAAE,KAAK,MAAM,KAAK,GAAG,GAAG,YAAY,EAAE,OAAO,iBAAiB,MAAM,GAAG,MAAM,KAAK,WAAW,CAAC;AAAA,MACvH;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,CAAC,QAAQ,YAAY,UAAU,CAAC,GAAG,cAAc,EAAE,OAAO,KAAK,CAAC,GAAG,GAAG;AACzF,QAAI,gBAAgB,CAAC;AACrB,aAAS,eAAe;AAAA,MACtB,cAAc,MAAM;AAAA,IACtB,CAAC;AACD,WAAO,UAAU,aAAa,aAAa;AAC3C,QAAI,qBAAqB;AACzB,QAAI,oBAAoB;AACxB,mBAAe,eAAe;AAC5B,YAAM,EAAE,WAAW,OAAO,KAAK,GAAG,kBAAkB,iBAAiB;AACrE,UAAI,cAAc,GAAG,kBAAkB,WAAW,SAAS;AAC3D,UAAI,CAAC,eAAe,GAAG,kBAAkB,YAAY,GAAG,kBAAkB,iBAAiB,WAAW,KAAK,CAAC,GAAG;AAC7G,cAAM,aAAa,GAAG,kBAAkB,mBAAmB;AAC3D,YAAI,CAAC,WAAW;AACd,gBAAM,IAAI,mBAAmB;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AACA,YAAI,CAAC,WAAW;AACd,gBAAM,IAAI,mBAAmB;AAAA,YAC3B;AAAA,UACF;AAAA,QACF;AACA,qBAAa,OAAO,GAAG,kBAAkB,oBAAoB,WAAW,WAAW,MAAM;AACzF,YAAI,CAAC,YAAY;AACf,gBAAM,IAAI,mBAAmB,qBAAqB,8BAA8B;AAAA,QAClF;AACA,SAAC,GAAG,kBAAkB,WAAW,YAAY,SAAS;AAAA,MACxD;AACA,cAAQ,IAAI,oBAAoB,WAAW;AAC3C;AAAA,IACF;AAAA;AAAA;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "agentv",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.15.0",
|
|
4
4
|
"description": "CLI entry point for AgentV",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"repository": {
|
|
@@ -24,7 +24,7 @@
|
|
|
24
24
|
"dotenv": "^16.4.5",
|
|
25
25
|
"micromatch": "^4.0.8",
|
|
26
26
|
"yaml": "^2.6.1",
|
|
27
|
-
"@agentv/core": "0.
|
|
27
|
+
"@agentv/core": "0.15.0"
|
|
28
28
|
},
|
|
29
29
|
"devDependencies": {
|
|
30
30
|
"execa": "^9.3.0"
|