@autobe/agent 0.7.3 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/AutoBeAgent.js +4 -0
- package/lib/AutoBeAgent.js.map +1 -1
- package/lib/constants/AutoBeSystemPromptConstant.d.ts +1 -1
- package/lib/index.mjs +92 -74
- package/lib/index.mjs.map +1 -1
- package/lib/orchestrate/analyze/AutoBeAnalyzeAgent.js +6 -7
- package/lib/orchestrate/analyze/AutoBeAnalyzeAgent.js.map +1 -1
- package/lib/orchestrate/analyze/orchestrateAnalyze.js +2 -5
- package/lib/orchestrate/analyze/orchestrateAnalyze.js.map +1 -1
- package/lib/orchestrate/interface/orchestrateInterfaceComplement.js +6 -8
- package/lib/orchestrate/interface/orchestrateInterfaceComplement.js.map +1 -1
- package/lib/orchestrate/interface/orchestrateInterfaceComponents.js +9 -6
- package/lib/orchestrate/interface/orchestrateInterfaceComponents.js.map +1 -1
- package/lib/orchestrate/interface/orchestrateInterfaceEndpoints.js +3 -1
- package/lib/orchestrate/interface/orchestrateInterfaceEndpoints.js.map +1 -1
- package/lib/orchestrate/interface/orchestrateInterfaceOperations.js +5 -8
- package/lib/orchestrate/interface/orchestrateInterfaceOperations.js.map +1 -1
- package/lib/orchestrate/prisma/orchestratePrismaComponent.js +5 -1
- package/lib/orchestrate/prisma/orchestratePrismaComponent.js.map +1 -1
- package/lib/orchestrate/prisma/orchestratePrismaCorrect.js +2 -5
- package/lib/orchestrate/prisma/orchestratePrismaCorrect.js.map +1 -1
- package/lib/orchestrate/prisma/orchestratePrismaSchema.js +11 -7
- package/lib/orchestrate/prisma/orchestratePrismaSchema.js.map +1 -1
- package/lib/orchestrate/prisma/transformPrismaCorrectHistories.js +1 -1
- package/lib/orchestrate/prisma/transformPrismaCorrectHistories.js.map +1 -1
- package/lib/orchestrate/test/orchestrateTestCorrect.js +2 -4
- package/lib/orchestrate/test/orchestrateTestCorrect.js.map +1 -1
- package/lib/orchestrate/test/orchestrateTestProgress.js +2 -4
- package/lib/orchestrate/test/orchestrateTestProgress.js.map +1 -1
- package/lib/orchestrate/test/orchestrateTestScenario.d.ts +1 -1
- package/lib/orchestrate/test/orchestrateTestScenario.js +44 -14
- package/lib/orchestrate/test/orchestrateTestScenario.js.map +1 -1
- package/lib/orchestrate/test/transformTestScenarioHistories.d.ts +1 -1
- package/lib/orchestrate/test/transformTestScenarioHistories.js +2 -2
- package/lib/orchestrate/test/transformTestScenarioHistories.js.map +1 -1
- package/lib/utils/enforceToolCall.d.ts +3 -0
- package/lib/utils/enforceToolCall.js +13 -0
- package/lib/utils/enforceToolCall.js.map +1 -0
- package/package.json +5 -5
- package/src/AutoBeAgent.ts +4 -0
- package/src/constants/AutoBeSystemPromptConstant.ts +1 -1
- package/src/orchestrate/analyze/AutoBeAnalyzeAgent.ts +4 -9
- package/src/orchestrate/analyze/orchestrateAnalyze.ts +2 -6
- package/src/orchestrate/interface/orchestrateInterfaceComplement.ts +12 -11
- package/src/orchestrate/interface/orchestrateInterfaceComponents.ts +7 -6
- package/src/orchestrate/interface/orchestrateInterfaceEndpoints.ts +2 -1
- package/src/orchestrate/interface/orchestrateInterfaceOperations.ts +4 -9
- package/src/orchestrate/prisma/orchestratePrismaComponent.ts +4 -1
- package/src/orchestrate/prisma/orchestratePrismaCorrect.ts +2 -5
- package/src/orchestrate/prisma/orchestratePrismaSchema.ts +10 -7
- package/src/orchestrate/test/orchestrateTestCorrect.ts +2 -6
- package/src/orchestrate/test/orchestrateTestProgress.ts +2 -5
- package/src/orchestrate/test/orchestrateTestScenario.ts +64 -16
- package/src/orchestrate/test/transformTestScenarioHistories.ts +2 -2
- package/src/utils/enforceToolCall.ts +13 -0
package/lib/index.mjs
CHANGED
|
@@ -53,6 +53,14 @@ function assertSchemaModel(model) {
|
|
|
53
53
|
if (model === "gemini") throw new Error([ "Error on AutoBeAgent.constructor(): gemini is not supported", "because it does not follow standard JSON schema specification.", "@autobe requires union type (`oneOf` or `anyOf`) for backend code generation,", "but gemini has banned them. Please wait until when `@agentica`", "supports prompt based function calling which can detour gemini's", "restriction of JSON schema specification." ].join(" "));
|
|
54
54
|
}
|
|
55
55
|
|
|
56
|
+
function enforceToolCall(agent) {
|
|
57
|
+
agent.on("request", (event => {
|
|
58
|
+
if (event.body.tools) event.body.tool_choice = "required";
|
|
59
|
+
if (event.body.parallel_tool_calls !== undefined) delete event.body.parallel_tool_calls;
|
|
60
|
+
}));
|
|
61
|
+
return agent;
|
|
62
|
+
}
|
|
63
|
+
|
|
56
64
|
class AutoBeAnalyzeFileSystem {
|
|
57
65
|
constructor(fileMap = {}) {
|
|
58
66
|
this.fileMap = fileMap;
|
|
@@ -85,9 +93,11 @@ class AutoBeAnalyzeAgent {
|
|
|
85
93
|
model: ctx.model,
|
|
86
94
|
execute: new AutoBeAnalyzeFileSystem(this.fileMap),
|
|
87
95
|
build: async files => {
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
96
|
+
var _a;
|
|
97
|
+
(_a = this.pointer).value ?? (_a.value = {
|
|
98
|
+
files: {}
|
|
99
|
+
});
|
|
100
|
+
Object.assign(this.pointer.value.files, files);
|
|
91
101
|
}
|
|
92
102
|
});
|
|
93
103
|
this.createAnalyzeAgent = () => {
|
|
@@ -119,12 +129,7 @@ class AutoBeAnalyzeAgent {
|
|
|
119
129
|
text: [ "The following is the name of the entire file.", "Use it to build a table of contents.", this.filenames.map((filename => `- ${filename}`)), "", "However, do not touch other than the file you have to create." ].join("\n")
|
|
120
130
|
} ]
|
|
121
131
|
});
|
|
122
|
-
|
|
123
|
-
if (event.body.tools) {
|
|
124
|
-
event.body.tool_choice = "required";
|
|
125
|
-
}
|
|
126
|
-
}));
|
|
127
|
-
return agent;
|
|
132
|
+
return enforceToolCall(agent);
|
|
128
133
|
};
|
|
129
134
|
}
|
|
130
135
|
async conversate(content, retry = 3) {
|
|
@@ -1003,11 +1008,7 @@ const orchestrateAnalyze = ctx => async props => {
|
|
|
1003
1008
|
},
|
|
1004
1009
|
histories: [ ...ctx.histories().filter((el => el.type === "assistantMessage" || el.type === "userMessage")) ]
|
|
1005
1010
|
});
|
|
1006
|
-
agentica
|
|
1007
|
-
if (event.body.tools) {
|
|
1008
|
-
event.body.tool_choice = "required";
|
|
1009
|
-
}
|
|
1010
|
-
}));
|
|
1011
|
+
enforceToolCall(agentica);
|
|
1011
1012
|
const determined = await agentica.conversate([ "Design a complete list of documents for that document", "```md", userPlanningRequirements, "```" ].join("\n"));
|
|
1012
1013
|
const lastMessage = determined[determined.length - 1];
|
|
1013
1014
|
if (lastMessage.type === "assistantMessage") {
|
|
@@ -1629,17 +1630,14 @@ async function step$2(ctx, document, retry) {
|
|
|
1629
1630
|
controllers: [ createApplication$9({
|
|
1630
1631
|
model: ctx.model,
|
|
1631
1632
|
build: next => {
|
|
1632
|
-
pointer.value =
|
|
1633
|
+
pointer.value ?? (pointer.value = {});
|
|
1634
|
+
Object.assign(pointer.value, OpenApiV3_1Emender.convertComponents({
|
|
1633
1635
|
schemas: next
|
|
1634
|
-
}).schemas ?? {};
|
|
1636
|
+
}).schemas ?? {});
|
|
1635
1637
|
}
|
|
1636
1638
|
}) ]
|
|
1637
1639
|
});
|
|
1638
|
-
agentica
|
|
1639
|
-
if (event.body.tools) {
|
|
1640
|
-
event.body.tool_choice = "required";
|
|
1641
|
-
}
|
|
1642
|
-
}));
|
|
1640
|
+
enforceToolCall(agentica);
|
|
1643
1641
|
await agentica.conversate("Fill missing schema types please");
|
|
1644
1642
|
if (pointer.value === null) {
|
|
1645
1643
|
throw new Error("Failed to fill missing schema types. No response from agentica.");
|
|
@@ -2056,7 +2054,7 @@ async function orchestrateInterfaceComponents(ctx, operations, capacity = 12) {
|
|
|
2056
2054
|
schemas: {}
|
|
2057
2055
|
};
|
|
2058
2056
|
for (const y of await Promise.all(matrix.map((async it => {
|
|
2059
|
-
const row = await divideAndConquer$
|
|
2057
|
+
const row = await divideAndConquer$2(ctx, operations, it, 3, (count => {
|
|
2060
2058
|
progress += count;
|
|
2061
2059
|
}));
|
|
2062
2060
|
ctx.dispatch({
|
|
@@ -2075,7 +2073,7 @@ async function orchestrateInterfaceComponents(ctx, operations, capacity = 12) {
|
|
|
2075
2073
|
return x;
|
|
2076
2074
|
}
|
|
2077
2075
|
|
|
2078
|
-
async function divideAndConquer$
|
|
2076
|
+
async function divideAndConquer$2(ctx, operations, typeNames, retry, progress) {
|
|
2079
2077
|
const remained = new Set(typeNames);
|
|
2080
2078
|
const components = {
|
|
2081
2079
|
schemas: {}
|
|
@@ -2116,16 +2114,17 @@ async function process$5(ctx, operations, oldbie, remained) {
|
|
|
2116
2114
|
controllers: [ createApplication$8({
|
|
2117
2115
|
model: ctx.model,
|
|
2118
2116
|
build: async components => {
|
|
2119
|
-
|
|
2117
|
+
var _a;
|
|
2118
|
+
pointer.value ?? (pointer.value = {
|
|
2119
|
+
schemas: {}
|
|
2120
|
+
});
|
|
2121
|
+
(_a = pointer.value).authorization ?? (_a.authorization = components.authorization);
|
|
2122
|
+
Object.assign(pointer.value.schemas, components.schemas);
|
|
2120
2123
|
},
|
|
2121
2124
|
pointer
|
|
2122
2125
|
}) ]
|
|
2123
2126
|
});
|
|
2124
|
-
agentica
|
|
2125
|
-
if (event.body.tools) {
|
|
2126
|
-
event.body.tool_choice = "required";
|
|
2127
|
-
}
|
|
2128
|
-
}));
|
|
2127
|
+
enforceToolCall(agentica);
|
|
2129
2128
|
const already = Object.keys(oldbie.schemas);
|
|
2130
2129
|
await agentica.conversate([ "Make type components please.", "", "Here is the list of request/response bodies' type names from", "OpenAPI operations. Make type components of them. If more object", "types are required during making the components, please make them", "too.", "", ...Array.from(remained).map((k => `- \`${k}\``)), ...already.length !== 0 ? [ "", "> By the way, here is the list of components schemas what you've", "> already made. So, you don't need to make them again.", ">", ...already.map((k => `> - \`${k}\``)) ] : [] ].join("\n"));
|
|
2131
2130
|
if (pointer.value === null) {
|
|
@@ -2587,7 +2586,8 @@ async function orchestrateInterfaceEndpoints(ctx, content = "Make API endpoints
|
|
|
2587
2586
|
controllers: [ createApplication$7({
|
|
2588
2587
|
model: ctx.model,
|
|
2589
2588
|
build: endpoints => {
|
|
2590
|
-
pointer.value = endpoints;
|
|
2589
|
+
pointer.value ?? (pointer.value = endpoints);
|
|
2590
|
+
pointer.value.push(...endpoints);
|
|
2591
2591
|
}
|
|
2592
2592
|
}) ]
|
|
2593
2593
|
});
|
|
@@ -2959,7 +2959,7 @@ async function orchestrateInterfaceOperations(ctx, endpoints, capacity = 12) {
|
|
|
2959
2959
|
});
|
|
2960
2960
|
let completed = 0;
|
|
2961
2961
|
const operations = await Promise.all(matrix.map((async it => {
|
|
2962
|
-
const row = await divideAndConquer(ctx, it, 3, (count => {
|
|
2962
|
+
const row = await divideAndConquer$1(ctx, it, 3, (count => {
|
|
2963
2963
|
completed += count;
|
|
2964
2964
|
}));
|
|
2965
2965
|
ctx.dispatch({
|
|
@@ -2975,7 +2975,7 @@ async function orchestrateInterfaceOperations(ctx, endpoints, capacity = 12) {
|
|
|
2975
2975
|
return operations.flat();
|
|
2976
2976
|
}
|
|
2977
2977
|
|
|
2978
|
-
async function divideAndConquer(ctx, endpoints, retry, progress) {
|
|
2978
|
+
async function divideAndConquer$1(ctx, endpoints, retry, progress) {
|
|
2979
2979
|
const remained = new HashSet(endpoints, OpenApiEndpointComparator.hashCode, OpenApiEndpointComparator.equals);
|
|
2980
2980
|
const operations = new HashMap(OpenApiEndpointComparator.hashCode, OpenApiEndpointComparator.equals);
|
|
2981
2981
|
for (let i = 0; i < retry; ++i) {
|
|
@@ -3009,16 +3009,12 @@ async function process$4(ctx, endpoints) {
|
|
|
3009
3009
|
controllers: [ createApplication$6({
|
|
3010
3010
|
model: ctx.model,
|
|
3011
3011
|
build: endpoints => {
|
|
3012
|
-
pointer.value =
|
|
3013
|
-
|
|
3014
|
-
|
|
3012
|
+
pointer.value ?? (pointer.value = []);
|
|
3013
|
+
pointer.value.push(...endpoints);
|
|
3014
|
+
}
|
|
3015
3015
|
}) ]
|
|
3016
3016
|
});
|
|
3017
|
-
agentica
|
|
3018
|
-
if (event.body.tools) {
|
|
3019
|
-
event.body.tool_choice = "required";
|
|
3020
|
-
}
|
|
3021
|
-
}));
|
|
3017
|
+
enforceToolCall(agentica);
|
|
3022
3018
|
await agentica.conversate([ "Make API operations for below endpoints:", "", "```json", JSON.stringify(Array.from(endpoints), null, 2), "```" ].join("\n"));
|
|
3023
3019
|
if (pointer.value === null) throw new Error("Failed to create operations.");
|
|
3024
3020
|
return pointer.value;
|
|
@@ -3288,7 +3284,6 @@ function createApplication$6(props) {
|
|
|
3288
3284
|
};
|
|
3289
3285
|
})()(next);
|
|
3290
3286
|
if (result.success === false) return result;
|
|
3291
|
-
props.pointer.value = result.data.operations;
|
|
3292
3287
|
const errors = [];
|
|
3293
3288
|
result.data.operations.forEach(((op, i) => {
|
|
3294
3289
|
if (op.method === "get" && op.requestBody !== null) errors.push({
|
|
@@ -4917,7 +4912,10 @@ async function orchestratePrismaComponents(ctx, content = "Please extract files
|
|
|
4917
4912
|
controllers: [ createApplication$5({
|
|
4918
4913
|
model: ctx.model,
|
|
4919
4914
|
build: next => {
|
|
4920
|
-
pointer.value =
|
|
4915
|
+
pointer.value ?? (pointer.value = {
|
|
4916
|
+
components: []
|
|
4917
|
+
});
|
|
4918
|
+
pointer.value.components.push(...next.components);
|
|
4921
4919
|
}
|
|
4922
4920
|
}) ]
|
|
4923
4921
|
});
|
|
@@ -5338,7 +5336,7 @@ const transformPrismaCorrectHistories = result => [ {
|
|
|
5338
5336
|
id: v4(),
|
|
5339
5337
|
created_at: (new Date).toISOString(),
|
|
5340
5338
|
type: "systemMessage",
|
|
5341
|
-
text: '# `AutoBePrisma` Targeted Validation Error Fixing Agent\n\nYou are a world-class Prisma schema validation and error resolution specialist working with structured `AutoBePrisma` definitions. Your primary mission is to analyze validation errors in `IAutoBePrismaValidation.IFailure` responses and provide precise fixes for **ONLY the affected tables/models** while maintaining complete schema integrity and business logic.\n\n## Core Operating Principles\n\n### 🚫 ABSOLUTE PROHIBITIONS\n- **NEVER ask for clarification** - analyze and fix validation errors directly\n- **NEVER remove or modify existing business logic** unless it causes validation errors\n- **NEVER delete model descriptions or field descriptions** unless removing duplicate elements\n- **NEVER create new duplicate fields, relations, or models**\n- **NEVER ignore validation errors** - every error must be addressed\n- **NEVER break existing relationships** unless they\'re causing validation errors\n- **NEVER change data types** unless specifically required by validation errors\n- **🔴 CRITICAL: NEVER delete fields or relationships to avoid compilation errors**\n- **🔴 CRITICAL: Only delete elements when they are EXACT DUPLICATES of existing elements**\n- **🔴 CRITICAL: Always FIX errors by correction, not by removal (unless duplicate)**\n- **🔴 CRITICAL: NEVER modify tables/models that are not mentioned in validation errors**\n\n### ✅ MANDATORY REQUIREMENTS\n- **Fix ONLY validation errors** listed in the IAutoBePrismaValidation.IFailure.errors array\n- **Return ONLY the corrected models/tables** that had validation errors\n- **Preserve business intent** and architectural patterns from original schema\n- **Maintain referential integrity** with unchanged models\n- **Preserve ALL model and field descriptions** (except for removed duplicates)\n- **Keep original naming conventions** unless they cause validation errors\n- **🟢 PRIORITY: Correct errors through proper fixes, not deletions**\n- **🟢 PRIORITY: Maintain ALL business functionality and data structure**\n- **🟢 PRIORITY: Minimize output scope to only affected models**\n\n## Targeted Fix Strategy\n\n### 1. Error Scope Analysis\n\n#### Error Filtering Process\n```typescript\ninterface IError {\n path: string; // File path where error occurs\n table: string; // Model name with the error - TARGET FOR FIX\n column: string | null; // Field name (null for model-level errors)\n message: string; // Detailed error description\n}\n```\n\n#### Affected Model Identification\n1. **Extract unique table names** from all errors in IError[] array\n2. **Group errors by table** for efficient processing\n3. **Identify cross-table dependencies** that need consideration\n4. **Focus ONLY on models mentioned in errors** - ignore all others\n5. **Track relationship impacts** on non-error models (for reference validation only)\n\n### 2. Targeted Error Resolution\n\n#### Model-Level Fixes (Scope: Single Model)\n- **Duplicate model names**: Rename affected model only\n- **Invalid model names**: Update naming convention for specific model\n- **Missing primary keys**: Add/fix primary key in affected model only\n- **Materialized view issues**: Fix material flag and naming for specific model\n\n#### Field-Level Fixes (Scope: Specific Fields in Error Models)\n- **Duplicate field names**: Fix only within the affected model\n- **Invalid field types**: Update types for specific fields only\n- **Missing foreign keys**: Add required foreign keys to affected model only\n- **Foreign key reference errors**: Fix references in affected model only\n\n#### Relationship Fixes (Scope: Affected Model Relations)\n- **Invalid target model references**: Update references in error model only\n- **Missing relation configurations**: Add/fix relations in affected model only\n- **Relation naming conflicts**: Resolve conflicts within affected model only\n\n#### Index Fixes (Scope: Affected Model Indexes)\n- **Invalid field references**: Fix index fieldNames in affected model only\n- **Single foreign key indexes**: Restructure indexes in affected model only\n- **Duplicate indexes**: Remove duplicates within affected model only\n\n### 3. Cross-Model Impact Analysis\n\n#### Reference Validation (Read-Only for Non-Error Models)\n- **Verify target model existence** for foreign key references\n- **Check target field validity** (usually "id" primary key)\n- **Validate bidirectional relationship consistency**\n- **Ensure renamed model references are updated** in other models\n\n#### Dependency Tracking\n- **Identify models that reference** the corrected models\n- **Note potential cascade effects** of model/field renaming\n- **Flag models that may need reference updates** (for external handling)\n- **Maintain awareness of schema-wide implications**\n\n### 4. Minimal Output Strategy\n\n#### Output Scope Determination\n**Include in output ONLY:**\n1. **Models explicitly mentioned in validation errors**\n2. **Models with fields that reference renamed models** (if any)\n3. **Models that require relationship updates** due to fixes\n\n**Exclude from output:**\n1. **Models with no validation errors**\n2. **Models not affected by fixes**\n3. **Models that maintain valid references to corrected models**\n\n#### Fix Documentation\nFor each corrected model, provide:\n- **Original error description**\n- **Applied fix explanation**\n- **Impact on other models** (reference updates needed)\n- **Business logic preservation confirmation**\n\n## Error Resolution Workflow\n\n### 1. Error Parsing & Scope Definition\n1. **Parse IAutoBePrismaValidation.IFailure** structure\n2. **Extract unique table names** from error array\n3. **Group errors by affected model** for batch processing\n4. **Identify minimal fix scope** - only what\'s necessary\n5. **Plan cross-model reference updates** (if needed)\n\n### 2. Targeted Fix Planning\n1. **Analyze each error model individually**\n2. **Plan fixes for each affected model**\n3. **Check for inter-model dependency impacts**\n4. **Determine minimal output scope**\n5. **Validate fix feasibility without breaking references**\n\n### 3. Precision Fix Implementation\n1. **Apply fixes ONLY to error models**\n2. **Update cross-references ONLY if needed**\n3. **Preserve all unchanged model integrity**\n4. **Maintain business logic in fixed models**\n5. **Verify minimal scope compliance**\n\n### 4. Output Validation\n1. **Confirm all errors are addressed** in affected models\n2. **Verify no new validation issues** in fixed models\n3. **Check reference integrity** with unchanged models\n4. **Validate business logic preservation** in corrected models\n5. **Ensure minimal output scope** - no unnecessary models included\n\n## Input/Output Format\n\n### Input Structure\n```typescript\n{\n success: false,\n application: AutoBePrisma.IApplication, // Full schema for reference\n errors: IError[] // Target models for fixing\n}\n```\n\n### Output Requirement\nReturn ONLY corrected models that had validation errors:\n```typescript\nconst correctedModels: AutoBePrisma.IModel[] = [\n // ONLY models mentioned in IError[] array\n // ONLY models affected by cross-reference updates\n // All other models are preserved unchanged\n];\n\n// Include metadata about the fix scope\nconst fixSummary = {\n correctedModels: string[], // Names of models that were fixed\n crossReferenceUpdates: string[], // Models that needed reference updates\n preservedModels: string[], // Models that remain unchanged\n errorsCorrected: number // Count of resolved errors\n};\n```\n\n## Targeted Correction Examples\n\n### Example 1: Single Model Duplicate Field Error\n**Input Error:**\n```typescript\n{\n path: "users.prisma",\n table: "users",\n column: "email",\n message: "Duplicate field \'email\' in model \'users\'"\n}\n```\n\n**Output:** Only the `users` model with the duplicate field resolved\n- **Scope:** 1 model\n- **Change:** Rename one `email` field to `email_secondary` or merge if identical\n- **Excluded:** All other models remain unchanged\n\n### Example 2: Cross-Model Reference Error\n**Input Error:**\n```typescript\n{\n path: "orders.prisma",\n table: "orders",\n column: "user_id",\n message: "Invalid target model \'user\' for foreign key \'user_id\'"\n}\n```\n\n**Output:** Only the `orders` model with corrected reference\n- **Scope:** 1 model (orders)\n- **Change:** Update `targetModel` from "user" to "users"\n- **Excluded:** The `users` model remains unchanged (just referenced correctly)\n\n### Example 3: Model Name Duplication Across Files\n**Input Errors:**\n```typescript\n[\n {\n path: "auth/users.prisma",\n table: "users",\n column: null,\n message: "Duplicate model name \'users\'"\n },\n {\n path: "admin/users.prisma",\n table: "users",\n column: null,\n message: "Duplicate model name \'users\'"\n }\n]\n```\n\n**Output:** Both affected `users` models with one renamed\n- **Scope:** 2 models\n- **Change:** Rename one to `admin_users`, update all its references\n- **Excluded:** All other models that don\'t reference the renamed model\n\n## Critical Success Criteria\n\n### ✅ Must Achieve (Targeted Scope)\n- [ ] All validation errors resolved **for mentioned models only**\n- [ ] Original business logic preserved **in corrected models**\n- [ ] Cross-model references remain valid **through minimal updates**\n- [ ] Output contains **ONLY affected models** - no unnecessary inclusions\n- [ ] Referential integrity maintained **with unchanged models**\n- [ ] **🔴 MINIMAL SCOPE: Only error models + necessary reference updates**\n- [ ] **🔴 UNCHANGED MODELS: Preserved completely in original schema**\n\n### 🚫 Must Avoid (Scope Violations)\n- [ ] Including models without validation errors in output\n- [ ] Modifying models not mentioned in error array\n- [ ] Returning entire schema when only partial fixes needed\n- [ ] Making unnecessary changes beyond error resolution\n- [ ] Breaking references to unchanged models\n- [ ] **🔴 SCOPE CREEP: Fixing models that don\'t have errors**\n- [ ] **🔴 OUTPUT BLOAT: Including unchanged models in response**\n\n## Quality Assurance Process\n\n### Pre-Output Scope Validation\n1. **Error Coverage Check**: Every error in IError[] array addressed **in minimal scope**\n2. **Output Scope Audit**: Only affected models included in response\n3. **Reference Integrity**: Unchanged models maintain valid references\n4. **Business Logic Preservation**: Corrected models maintain original intent\n5. **Cross-Model Impact**: Necessary reference updates identified and applied\n6. ****🔴 Minimal Output Verification**: No unnecessary models in response**\n7. **🔴 Unchanged Model Preservation**: Non-error models completely preserved**\n\n### Targeted Response Validation Questions\n- Are all validation errors resolved **with minimal model changes**?\n- Does the output include **ONLY models that had errors** or needed reference updates?\n- Are **unchanged models completely preserved** in the original schema?\n- Do **cross-model references remain valid** after targeted fixes?\n- Is the **business logic maintained** in all corrected models?\n- **🔴 Is the output scope minimized** to only necessary corrections?\n- **🔴 Are non-error models excluded** from the response?\n\n## 🎯 CORE PRINCIPLE REMINDER\n\n**Your role is TARGETED ERROR CORRECTOR, not SCHEMA RECONSTRUCTOR**\n\n- Fix **ONLY the models with validation errors**\n- Preserve **ALL unchanged models** in their original state\n- Return **MINIMAL output scope** - only what was corrected\n- Maintain **referential integrity** with unchanged models\n- **Focus on precision fixes, not comprehensive rebuilds**\n\nRemember: Your goal is to be a surgical validation error resolver, fixing only what\'s broken while preserving the integrity of the unchanged schema components. **Minimize context usage by returning only the corrected models, not the entire schema.**'
|
|
5339
|
+
text: '# `AutoBePrisma` Targeted Validation Error Fixing Agent\n\nYou are a world-class Prisma schema validation and error resolution specialist working with structured `AutoBePrisma` definitions. Your primary mission is to analyze validation errors in `IAutoBePrismaValidation.IFailure` responses and provide precise fixes for **ONLY the affected tables/models** while maintaining complete schema integrity and business logic.\n\n## Core Operating Principles\n\n### 🚫 ABSOLUTE PROHIBITIONS\n- **NEVER ask for clarification** - analyze and fix validation errors directly\n- **NEVER remove or modify existing business logic** unless it causes validation errors\n- **NEVER delete model descriptions or field descriptions** unless removing duplicate elements\n- **NEVER create new duplicate fields, relations, or models**\n- **NEVER ignore validation errors** - every error must be addressed\n- **NEVER break existing relationships** unless they\'re causing validation errors\n- **NEVER change data types** unless specifically required by validation errors\n- **🔴 CRITICAL: NEVER delete fields or relationships to avoid compilation errors**\n- **🔴 CRITICAL: Only delete elements when they are EXACT DUPLICATES of existing elements**\n- **🔴 CRITICAL: Always FIX errors by correction, not by removal (unless duplicate)**\n- **🔴 CRITICAL: NEVER modify tables/models that are not mentioned in validation errors**\n- **🔴 CRITICAL: NEVER make multiple function calls - execute ALL fixes in a SINGLE function call only**\n\n### ✅ MANDATORY REQUIREMENTS\n- **🔥 CRITICAL: MUST execute exactly ONE function call** - this is absolutely required, no exceptions\n- **🔥 CRITICAL: NEVER respond without making a function call** - function calling is mandatory for all validation error fixes\n- **Fix ONLY validation errors** listed in the IAutoBePrismaValidation.IFailure.errors array\n- **Return ONLY the corrected models/tables** that had validation errors\n- **Preserve business intent** and architectural patterns from original schema\n- **Maintain referential integrity** with unchanged models\n- **Preserve ALL model and field descriptions** (except for removed duplicates)\n- **Keep original naming conventions** unless they cause validation errors\n- **🟢 PRIORITY: Correct errors through proper fixes, not deletions**\n- **🟢 PRIORITY: Maintain ALL business functionality and data structure**\n- **🟢 PRIORITY: Minimize output scope to only affected models**\n- **🟢 PRIORITY: Execute ALL corrections in ONE SINGLE function call - never use parallel or multiple calls**\n\n## Function Calling Protocol\n\n### 🔥 CRITICAL FUNCTION CALLING RULES\n- **FUNCTION CALLING IS MANDATORY** - you MUST make exactly one function call for every validation error fixing task\n- **NEVER provide a response without making a function call** - this is absolutely required\n- **EXECUTE ONLY ONE FUNCTION CALL** throughout the entire correction process\n- **NEVER use parallel function calls** - all fixes must be consolidated into a single invocation\n- **NEVER make sequential function calls** - plan all corrections and execute them together\n- **BATCH ALL CORRECTIONS** into one comprehensive function call\n- **NO EXCEPTIONS** - regardless of error complexity, use only one function call\n- **NO TEXT-ONLY RESPONSES** - always include the corrected models via function call\n\n### Single-Call Strategy\n1. **Analyze ALL validation errors** before making any function calls\n2. **Plan ALL corrections** for all affected models simultaneously\n3. **Consolidate ALL fixes** into one comprehensive correction set\n4. **Execute ONE FUNCTION CALL** containing all corrected models\n5. **Never iterate** - get it right in the single call\n\n## Targeted Fix Strategy\n\n### 1. Error Scope Analysis\n\n#### Error Filtering Process\n```typescript\ninterface IError {\n path: string; // File path where error occurs\n table: string; // Model name with the error - TARGET FOR FIX\n column: string | null; // Field name (null for model-level errors)\n message: string; // Detailed error description\n}\n```\n\n#### Affected Model Identification\n1. **Extract unique table names** from all errors in IError[] array\n2. **Group errors by table** for efficient processing\n3. **Identify cross-table dependencies** that need consideration\n4. **Focus ONLY on models mentioned in errors** - ignore all others\n5. **Track relationship impacts** on non-error models (for reference validation only)\n\n### 2. Targeted Error Resolution\n\n#### Model-Level Fixes (Scope: Single Model)\n- **Duplicate model names**: Rename affected model only\n- **Invalid model names**: Update naming convention for specific model\n- **Missing primary keys**: Add/fix primary key in affected model only\n- **Materialized view issues**: Fix material flag and naming for specific model\n\n#### Field-Level Fixes (Scope: Specific Fields in Error Models)\n- **Duplicate field names**: Fix only within the affected model\n- **Invalid field types**: Update types for specific fields only\n- **Missing foreign keys**: Add required foreign keys to affected model only\n- **Foreign key reference errors**: Fix references in affected model only\n\n#### Relationship Fixes (Scope: Affected Model Relations)\n- **Invalid target model references**: Update references in error model only\n- **Missing relation configurations**: Add/fix relations in affected model only\n- **Relation naming conflicts**: Resolve conflicts within affected model only\n\n#### Index Fixes (Scope: Affected Model Indexes)\n- **Invalid field references**: Fix index fieldNames in affected model only\n- **Single foreign key indexes**: Restructure indexes in affected model only\n- **Duplicate indexes**: Remove duplicates within affected model only\n\n### 3. Cross-Model Impact Analysis\n\n#### Reference Validation (Read-Only for Non-Error Models)\n- **Verify target model existence** for foreign key references\n- **Check target field validity** (usually "id" primary key)\n- **Validate bidirectional relationship consistency**\n- **Ensure renamed model references are updated** in other models\n\n#### Dependency Tracking\n- **Identify models that reference** the corrected models\n- **Note potential cascade effects** of model/field renaming\n- **Flag models that may need reference updates** (for external handling)\n- **Maintain awareness of schema-wide implications**\n\n### 4. Minimal Output Strategy\n\n#### Output Scope Determination\n**Include in output ONLY:**\n1. **Models explicitly mentioned in validation errors**\n2. **Models with fields that reference renamed models** (if any)\n3. **Models that require relationship updates** due to fixes\n\n**Exclude from output:**\n1. **Models with no validation errors**\n2. **Models not affected by fixes**\n3. **Models that maintain valid references to corrected models**\n\n#### Fix Documentation\nFor each corrected model, provide:\n- **Original error description**\n- **Applied fix explanation**\n- **Impact on other models** (reference updates needed)\n- **Business logic preservation confirmation**\n\n## Error Resolution Workflow\n\n### 1. Error Parsing & Scope Definition\n1. **Parse IAutoBePrismaValidation.IFailure** structure\n2. **Extract unique table names** from error array\n3. **Group errors by affected model** for batch processing\n4. **Identify minimal fix scope** - only what\'s necessary\n5. **Plan cross-model reference updates** (if needed)\n\n### 2. Targeted Fix Planning\n1. **Analyze each error model individually**\n2. **Plan fixes for each affected model**\n3. **Check for inter-model dependency impacts**\n4. **Determine minimal output scope**\n5. **Validate fix feasibility without breaking references**\n6. **🔥 CONSOLIDATE ALL PLANNED FIXES** for single function call execution\n\n### 3. Precision Fix Implementation\n1. **Apply fixes ONLY to error models**\n2. **Update cross-references ONLY if needed**\n3. **Preserve all unchanged model integrity**\n4. **Maintain business logic in fixed models**\n5. **Verify minimal scope compliance**\n6. **🔥 EXECUTE ALL FIXES IN ONE FUNCTION CALL**\n\n### 4. Output Validation\n1. **Confirm all errors are addressed** in affected models\n2. **Verify no new validation issues** in fixed models\n3. **Check reference integrity** with unchanged models\n4. **Validate business logic preservation** in corrected models\n5. **Ensure minimal output scope** - no unnecessary models included\n6. **🔥 VERIFY SINGLE FUNCTION CALL COMPLETION** - no additional calls needed\n\n## Input/Output Format\n\n### Input Structure\n```typescript\n{\n success: false,\n application: AutoBePrisma.IApplication, // Full schema for reference\n errors: IError[] // Target models for fixing\n}\n```\n\n### Output Requirement\nReturn ONLY corrected models that had validation errors:\n```typescript\nconst correctedModels: AutoBePrisma.IModel[] = [\n // ONLY models mentioned in IError[] array\n // ONLY models affected by cross-reference updates\n // All other models are preserved unchanged\n];\n```\n\n## Targeted Correction Examples\n\n### Example 1: Single Model Duplicate Field Error\n**Input Error:**\n```typescript\n{\n path: "users.prisma",\n table: "users",\n column: "email",\n message: "Duplicate field \'email\' in model \'users\'"\n}\n```\n\n**Output:** Only the `users` model with the duplicate field resolved\n- **Scope:** 1 model\n- **Change:** Rename one `email` field to `email_secondary` or merge if identical\n- **Excluded:** All other models remain unchanged\n- **🔥 Function Calls:** Exactly 1 function call with the corrected users model\n\n### Example 2: Cross-Model Reference Error\n**Input Error:**\n```typescript\n{\n path: "orders.prisma",\n table: "orders",\n column: "user_id",\n message: "Invalid target model \'user\' for foreign key \'user_id\'"\n}\n```\n\n**Output:** Only the `orders` model with corrected reference\n- **Scope:** 1 model (orders)\n- **Change:** Update `targetModel` from "user" to "users"\n- **Excluded:** The `users` model remains unchanged (just referenced correctly)\n- **🔥 Function Calls:** Exactly 1 function call with the corrected orders model\n\n### Example 3: Model Name Duplication Across Files\n**Input Errors:**\n```typescript\n[\n {\n path: "auth/users.prisma",\n table: "users",\n column: null,\n message: "Duplicate model name \'users\'"\n },\n {\n path: "admin/users.prisma",\n table: "users",\n column: null,\n message: "Duplicate model name \'users\'"\n }\n]\n```\n\n**Output:** Both affected `users` models with one renamed\n- **Scope:** 2 models\n- **Change:** Rename one to `admin_users`, update all its references\n- **Excluded:** All other models that don\'t reference the renamed model\n- **🔥 Function Calls:** Exactly 1 function call with BOTH corrected users models\n\n## Critical Success Criteria\n\n### ✅ Must Achieve (Targeted Scope)\n- [ ] **🔥 MANDATORY FUNCTION CALL: Exactly one function call executed** - this is absolutely required\n- [ ] All validation errors resolved **for mentioned models only**\n- [ ] Original business logic preserved **in corrected models**\n- [ ] Cross-model references remain valid **through minimal updates**\n- [ ] Output contains **ONLY affected models** - no unnecessary inclusions\n- [ ] Referential integrity maintained **with unchanged models**\n- [ ] **🔴 MINIMAL SCOPE: Only error models + necessary reference updates**\n- [ ] **🔴 UNCHANGED MODELS: Preserved completely in original schema**\n- [ ] **🔥 SINGLE FUNCTION CALL: All corrections executed in exactly one function call**\n\n### 🚫 Must Avoid (Scope Violations)\n- [ ] **🔥 NO FUNCTION CALL: Responding without making any function call** - this is absolutely prohibited\n- [ ] Including models without validation errors in output\n- [ ] Modifying models not mentioned in error array\n- [ ] Returning entire schema when only partial fixes needed\n- [ ] Making unnecessary changes beyond error resolution\n- [ ] Breaking references to unchanged models\n- [ ] **🔴 SCOPE CREEP: Fixing models that don\'t have errors**\n- [ ] **🔴 OUTPUT BLOAT: Including unchanged models in response**\n- [ ] **🔥 MULTIPLE FUNCTION CALLS: Making more than one function call**\n- [ ] **🔥 PARALLEL CALLS: Using parallel function execution**\n- [ ] **🔥 TEXT-ONLY RESPONSES: Providing corrections without function calls**\n\n## Quality Assurance Process\n\n### Pre-Output Scope Validation\n1. **Error Coverage Check**: Every error in IError[] array addressed **in minimal scope**\n2. **Output Scope Audit**: Only affected models included in response\n3. **Reference Integrity**: Unchanged models maintain valid references\n4. **Business Logic Preservation**: Corrected models maintain original intent\n5. **Cross-Model Impact**: Necessary reference updates identified and applied\n6. **🔴 Minimal Output Verification**: No unnecessary models in response**\n7. **🔴 Unchanged Model Preservation**: Non-error models completely preserved**\n8. **🔥 Single Call Verification**: All fixes consolidated into one function call**\n\n### Targeted Response Validation Questions\n- Are all validation errors resolved **with minimal model changes**?\n- Does the output include **ONLY models that had errors** or needed reference updates?\n- Are **unchanged models completely preserved** in the original schema?\n- Do **cross-model references remain valid** after targeted fixes?\n- Is the **business logic maintained** in all corrected models?\n- **🔴 Is the output scope minimized** to only necessary corrections?\n- **🔴 Are non-error models excluded** from the response?\n- **🔥 Were ALL corrections executed in exactly ONE function call?**\n- **🔥 Are there NO parallel or sequential function calls?**\n\n## 🎯 CORE PRINCIPLE REMINDER\n\n**Your role is TARGETED ERROR CORRECTOR, not SCHEMA RECONSTRUCTOR**\n\n- **🔥 ALWAYS make exactly ONE function call** - this is mandatory for every response\n- Fix **ONLY the models with validation errors**\n- Preserve **ALL unchanged models** in their original state\n- Return **MINIMAL output scope** - only what was corrected\n- Maintain **referential integrity** with unchanged models\n- **Focus on precision fixes, not comprehensive rebuilds**\n- **🔥 EXECUTE ALL CORRECTIONS IN EXACTLY ONE FUNCTION CALL**\n\nRemember: Your goal is to be a surgical validation error resolver, fixing only what\'s broken while preserving the integrity of the unchanged schema components. **Minimize context usage by returning only the corrected models, not the entire schema.** **Most importantly, consolidate ALL your corrections into a single function call - never use multiple or parallel function calls under any circumstances.** **NEVER respond without making a function call - this is absolutely mandatory for all validation error correction tasks.**'
|
|
5342
5340
|
}, {
|
|
5343
5341
|
id: v4(),
|
|
5344
5342
|
created_at: (new Date).toISOString(),
|
|
@@ -5399,11 +5397,7 @@ async function step$1(ctx, application, life) {
|
|
|
5399
5397
|
}
|
|
5400
5398
|
}) ]
|
|
5401
5399
|
});
|
|
5402
|
-
agentica
|
|
5403
|
-
if (event.body.tools) {
|
|
5404
|
-
event.body.tool_choice = "required";
|
|
5405
|
-
}
|
|
5406
|
-
}));
|
|
5400
|
+
enforceToolCall(agentica);
|
|
5407
5401
|
await agentica.conversate("Resolve the compilation errors in the provided Prisma schema files.");
|
|
5408
5402
|
if (pointer.value === null) {
|
|
5409
5403
|
console.error("Unreachable error: PrismaCompilerAgent.pointer.value is null");
|
|
@@ -7123,16 +7117,18 @@ async function process$3(ctx, component) {
|
|
|
7123
7117
|
controllers: [ createApplication$3({
|
|
7124
7118
|
model: ctx.model,
|
|
7125
7119
|
build: next => {
|
|
7126
|
-
pointer.value =
|
|
7127
|
-
|
|
7120
|
+
pointer.value ?? (pointer.value = {
|
|
7121
|
+
file: {
|
|
7122
|
+
filename: component.filename,
|
|
7123
|
+
namespace: next.file.namespace,
|
|
7124
|
+
models: []
|
|
7125
|
+
}
|
|
7126
|
+
});
|
|
7127
|
+
pointer.value.file.models.push(...next.file.models);
|
|
7128
7128
|
}
|
|
7129
7129
|
}) ]
|
|
7130
7130
|
});
|
|
7131
|
-
agentica
|
|
7132
|
-
if (event.body.tools) {
|
|
7133
|
-
event.body.tool_choice = "required";
|
|
7134
|
-
}
|
|
7135
|
-
}));
|
|
7131
|
+
enforceToolCall(agentica);
|
|
7136
7132
|
await agentica.conversate("Make prisma schema file please");
|
|
7137
7133
|
if (pointer.value === null) throw new Error("Unreachable code: Prisma Schema not generated");
|
|
7138
7134
|
return pointer.value;
|
|
@@ -9056,9 +9052,7 @@ async function process$2(ctx, diagnostics, code) {
|
|
|
9056
9052
|
}
|
|
9057
9053
|
}) ]
|
|
9058
9054
|
});
|
|
9059
|
-
agentica
|
|
9060
|
-
if (event.body.tools) event.body.tool_choice = "required";
|
|
9061
|
-
}));
|
|
9055
|
+
enforceToolCall(agentica);
|
|
9062
9056
|
await agentica.conversate([ "Fix the compilation error in the provided code.", "", "## Original Code", "```typescript", code, "```", "", diagnostics.map((diagnostic => {
|
|
9063
9057
|
if (diagnostic.start === undefined || diagnostic.length === undefined) return "";
|
|
9064
9058
|
return [ "## Error Information", `- Position: Characters ${diagnostic.start} to ${diagnostic.start + diagnostic.length}`, `- Error Message: ${diagnostic.messageText}`, `- Problematic Code: \`${code.substring(diagnostic.start, diagnostic.start + diagnostic.length)}\``, "" ].join("\n");
|
|
@@ -9411,9 +9405,7 @@ async function process$1(ctx, scenario) {
|
|
|
9411
9405
|
}
|
|
9412
9406
|
}) ]
|
|
9413
9407
|
});
|
|
9414
|
-
agentica
|
|
9415
|
-
if (event.body.tools) event.body.tool_choice = "required";
|
|
9416
|
-
}));
|
|
9408
|
+
enforceToolCall(agentica);
|
|
9417
9409
|
await agentica.conversate([ "Create test code for below scenario:", "", "```json", JSON.stringify(scenario, null, 2), "```" ].join("\n"));
|
|
9418
9410
|
if (pointer.value === null) throw new Error("Failed to create test code.");
|
|
9419
9411
|
return pointer.value;
|
|
@@ -9686,7 +9678,7 @@ const collection$2 = {
|
|
|
9686
9678
|
}
|
|
9687
9679
|
};
|
|
9688
9680
|
|
|
9689
|
-
const transformTestScenarioHistories = (state,
|
|
9681
|
+
const transformTestScenarioHistories = (state, allEndpoints, files) => {
|
|
9690
9682
|
if (state.analyze === null) return [ {
|
|
9691
9683
|
id: v4(),
|
|
9692
9684
|
created_at: (new Date).toISOString(),
|
|
@@ -9742,7 +9734,7 @@ const transformTestScenarioHistories = (state, endponits, files) => {
|
|
|
9742
9734
|
id: v4(),
|
|
9743
9735
|
created_at: (new Date).toISOString(),
|
|
9744
9736
|
type: "systemMessage",
|
|
9745
|
-
text: [ `This is a description of different APIs.`, `Different APIs may have to be called to create one.`, `Check which functions have been developed.`, "```json", JSON.stringify(
|
|
9737
|
+
text: [ `This is a description of different APIs.`, `Different APIs may have to be called to create one.`, `Check which functions have been developed.`, "```json", JSON.stringify(allEndpoints, null, 2), "```" ].join("\n")
|
|
9746
9738
|
}, {
|
|
9747
9739
|
id: v4(),
|
|
9748
9740
|
created_at: (new Date).toISOString(),
|
|
@@ -9751,7 +9743,7 @@ const transformTestScenarioHistories = (state, endponits, files) => {
|
|
|
9751
9743
|
} ];
|
|
9752
9744
|
};
|
|
9753
9745
|
|
|
9754
|
-
async function orchestrateTestScenario(ctx) {
|
|
9746
|
+
async function orchestrateTestScenario(ctx, capacity = 4) {
|
|
9755
9747
|
const files = Object.entries(ctx.state().interface?.files ?? {}).filter((([filename]) => filename.startsWith("test/features/api/"))).reduce(((acc, [filename, content]) => Object.assign(acc, {
|
|
9756
9748
|
[filename]: content
|
|
9757
9749
|
})), {});
|
|
@@ -9765,11 +9757,16 @@ async function orchestrateTestScenario(ctx) {
|
|
|
9765
9757
|
requestBody: it.requestBody,
|
|
9766
9758
|
responseBody: it.responseBody
|
|
9767
9759
|
})));
|
|
9760
|
+
const matrix = divideArray({
|
|
9761
|
+
array: endpoints,
|
|
9762
|
+
capacity
|
|
9763
|
+
});
|
|
9768
9764
|
const start = new Date;
|
|
9769
9765
|
let completed = 0;
|
|
9770
|
-
const scenarios = await Promise.all(
|
|
9771
|
-
const
|
|
9772
|
-
|
|
9766
|
+
const scenarios = await Promise.all(matrix.map((async e => {
|
|
9767
|
+
const rows = await divideAndConquer(ctx, e, endpoints, files, 3, (count => {
|
|
9768
|
+
completed += count;
|
|
9769
|
+
}));
|
|
9773
9770
|
ctx.dispatch({
|
|
9774
9771
|
type: "testScenario",
|
|
9775
9772
|
scenarios: rows,
|
|
@@ -9790,7 +9787,26 @@ async function orchestrateTestScenario(ctx) {
|
|
|
9790
9787
|
};
|
|
9791
9788
|
}
|
|
9792
9789
|
|
|
9793
|
-
async function
|
|
9790
|
+
async function divideAndConquer(ctx, endpoints, allEndpoints, files, retry, progress) {
|
|
9791
|
+
const remained = new HashSet(endpoints, OpenApiEndpointComparator.hashCode, OpenApiEndpointComparator.equals);
|
|
9792
|
+
const scenarios = new HashMap(OpenApiEndpointComparator.hashCode, OpenApiEndpointComparator.equals);
|
|
9793
|
+
for (let i = 0; i < retry; ++i) {
|
|
9794
|
+
if (remained.empty() === true || scenarios.size() >= endpoints.length) break;
|
|
9795
|
+
const before = scenarios.size();
|
|
9796
|
+
const newbie = await process(ctx, Array.from(remained), allEndpoints, files);
|
|
9797
|
+
for (const item of newbie) {
|
|
9798
|
+
scenarios.set(item.endpoint, item.scenarios);
|
|
9799
|
+
remained.erase(item.endpoint);
|
|
9800
|
+
}
|
|
9801
|
+
if (scenarios.size() - before !== 0) progress(scenarios.size() - before);
|
|
9802
|
+
}
|
|
9803
|
+
return Array.from(scenarios.toJSON()).map((it => ({
|
|
9804
|
+
endpoint: it.first,
|
|
9805
|
+
scenarios: it.second
|
|
9806
|
+
})));
|
|
9807
|
+
}
|
|
9808
|
+
|
|
9809
|
+
async function process(ctx, endpoints, allEndpoints, files) {
|
|
9794
9810
|
const pointer = {
|
|
9795
9811
|
value: null
|
|
9796
9812
|
};
|
|
@@ -9806,18 +9822,17 @@ async function process(ctx, endpoint, endpoints, files) {
|
|
|
9806
9822
|
}
|
|
9807
9823
|
},
|
|
9808
9824
|
tokenUsage: ctx.usage(),
|
|
9809
|
-
histories: [ ...transformTestScenarioHistories(ctx.state(),
|
|
9825
|
+
histories: [ ...transformTestScenarioHistories(ctx.state(), allEndpoints, files) ],
|
|
9810
9826
|
controllers: [ createApplication({
|
|
9811
9827
|
model: ctx.model,
|
|
9812
9828
|
build: next => {
|
|
9813
|
-
pointer.value =
|
|
9829
|
+
pointer.value ?? (pointer.value = []);
|
|
9830
|
+
pointer.value.push(...next.scenarios);
|
|
9814
9831
|
}
|
|
9815
9832
|
}) ]
|
|
9816
9833
|
});
|
|
9817
|
-
agentica
|
|
9818
|
-
|
|
9819
|
-
}));
|
|
9820
|
-
await agentica.conversate([ "Make User Scenarios for below endpoint:", "", "```json", JSON.stringify(endpoint, null, 2), "```" ].join("\n"));
|
|
9834
|
+
enforceToolCall(agentica);
|
|
9835
|
+
await agentica.conversate([ "Make User Scenarios for below endpoints:", "", "```json", JSON.stringify(endpoints, null, 2), "```" ].join("\n"));
|
|
9821
9836
|
if (pointer.value === null) throw new Error("Failed to make scenarios.");
|
|
9822
9837
|
return pointer.value;
|
|
9823
9838
|
}
|
|
@@ -11877,6 +11892,9 @@ class AutoBeAgent {
|
|
|
11877
11892
|
created_at: history.created_at
|
|
11878
11893
|
}).catch((() => {}));
|
|
11879
11894
|
}));
|
|
11895
|
+
this.agentica_.on("request", (e => {
|
|
11896
|
+
if (e.body.parallel_tool_calls !== undefined) delete e.body.parallel_tool_calls;
|
|
11897
|
+
}));
|
|
11880
11898
|
}
|
|
11881
11899
|
clone() {
|
|
11882
11900
|
return new AutoBeAgent({
|