amd-gaia 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- amd_gaia-0.14.1.dist-info/METADATA +768 -0
- amd_gaia-0.14.1.dist-info/RECORD +800 -0
- amd_gaia-0.14.1.dist-info/WHEEL +5 -0
- amd_gaia-0.14.1.dist-info/entry_points.txt +5 -0
- amd_gaia-0.14.1.dist-info/licenses/LICENSE.md +21 -0
- amd_gaia-0.14.1.dist-info/top_level.txt +1 -0
- gaia/__init__.py +2 -0
- gaia/agents/__init__.py +19 -0
- gaia/agents/base/__init__.py +9 -0
- gaia/agents/base/agent.py +2072 -0
- gaia/agents/base/api_agent.py +120 -0
- gaia/agents/base/console.py +1457 -0
- gaia/agents/base/mcp_agent.py +86 -0
- gaia/agents/base/tools.py +83 -0
- gaia/agents/blender/agent.py +556 -0
- gaia/agents/blender/agent_simple.py +135 -0
- gaia/agents/blender/app.py +211 -0
- gaia/agents/blender/app_simple.py +41 -0
- gaia/agents/blender/core/__init__.py +16 -0
- gaia/agents/blender/core/materials.py +506 -0
- gaia/agents/blender/core/objects.py +316 -0
- gaia/agents/blender/core/rendering.py +225 -0
- gaia/agents/blender/core/scene.py +220 -0
- gaia/agents/blender/core/view.py +146 -0
- gaia/agents/chat/__init__.py +9 -0
- gaia/agents/chat/agent.py +975 -0
- gaia/agents/chat/app.py +1058 -0
- gaia/agents/chat/session.py +508 -0
- gaia/agents/chat/tools/__init__.py +15 -0
- gaia/agents/chat/tools/file_tools.py +96 -0
- gaia/agents/chat/tools/rag_tools.py +1729 -0
- gaia/agents/chat/tools/shell_tools.py +436 -0
- gaia/agents/code/__init__.py +7 -0
- gaia/agents/code/agent.py +547 -0
- gaia/agents/code/app.py +266 -0
- gaia/agents/code/models.py +135 -0
- gaia/agents/code/orchestration/__init__.py +24 -0
- gaia/agents/code/orchestration/checklist_executor.py +1739 -0
- gaia/agents/code/orchestration/checklist_generator.py +709 -0
- gaia/agents/code/orchestration/factories/__init__.py +9 -0
- gaia/agents/code/orchestration/factories/base.py +63 -0
- gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -0
- gaia/agents/code/orchestration/factories/python_factory.py +106 -0
- gaia/agents/code/orchestration/orchestrator.py +610 -0
- gaia/agents/code/orchestration/project_analyzer.py +391 -0
- gaia/agents/code/orchestration/steps/__init__.py +67 -0
- gaia/agents/code/orchestration/steps/base.py +188 -0
- gaia/agents/code/orchestration/steps/error_handler.py +314 -0
- gaia/agents/code/orchestration/steps/nextjs.py +828 -0
- gaia/agents/code/orchestration/steps/python.py +307 -0
- gaia/agents/code/orchestration/template_catalog.py +463 -0
- gaia/agents/code/orchestration/workflows/__init__.py +14 -0
- gaia/agents/code/orchestration/workflows/base.py +80 -0
- gaia/agents/code/orchestration/workflows/nextjs.py +186 -0
- gaia/agents/code/orchestration/workflows/python.py +94 -0
- gaia/agents/code/prompts/__init__.py +11 -0
- gaia/agents/code/prompts/base_prompt.py +77 -0
- gaia/agents/code/prompts/code_patterns.py +1925 -0
- gaia/agents/code/prompts/nextjs_prompt.py +40 -0
- gaia/agents/code/prompts/python_prompt.py +109 -0
- gaia/agents/code/schema_inference.py +365 -0
- gaia/agents/code/system_prompt.py +41 -0
- gaia/agents/code/tools/__init__.py +42 -0
- gaia/agents/code/tools/cli_tools.py +1138 -0
- gaia/agents/code/tools/code_formatting.py +319 -0
- gaia/agents/code/tools/code_tools.py +769 -0
- gaia/agents/code/tools/error_fixing.py +1347 -0
- gaia/agents/code/tools/external_tools.py +180 -0
- gaia/agents/code/tools/file_io.py +845 -0
- gaia/agents/code/tools/prisma_tools.py +190 -0
- gaia/agents/code/tools/project_management.py +1016 -0
- gaia/agents/code/tools/testing.py +321 -0
- gaia/agents/code/tools/typescript_tools.py +122 -0
- gaia/agents/code/tools/validation_parsing.py +461 -0
- gaia/agents/code/tools/validation_tools.py +803 -0
- gaia/agents/code/tools/web_dev_tools.py +1744 -0
- gaia/agents/code/validators/__init__.py +16 -0
- gaia/agents/code/validators/antipattern_checker.py +241 -0
- gaia/agents/code/validators/ast_analyzer.py +197 -0
- gaia/agents/code/validators/requirements_validator.py +145 -0
- gaia/agents/code/validators/syntax_validator.py +171 -0
- gaia/agents/docker/__init__.py +7 -0
- gaia/agents/docker/agent.py +642 -0
- gaia/agents/jira/__init__.py +11 -0
- gaia/agents/jira/agent.py +894 -0
- gaia/agents/jira/jql_templates.py +299 -0
- gaia/agents/routing/__init__.py +7 -0
- gaia/agents/routing/agent.py +512 -0
- gaia/agents/routing/system_prompt.py +75 -0
- gaia/api/__init__.py +23 -0
- gaia/api/agent_registry.py +238 -0
- gaia/api/app.py +305 -0
- gaia/api/openai_server.py +575 -0
- gaia/api/schemas.py +186 -0
- gaia/api/sse_handler.py +370 -0
- gaia/apps/__init__.py +4 -0
- gaia/apps/llm/__init__.py +6 -0
- gaia/apps/llm/app.py +169 -0
- gaia/apps/summarize/app.py +633 -0
- gaia/apps/summarize/html_viewer.py +133 -0
- gaia/apps/summarize/pdf_formatter.py +284 -0
- gaia/audio/__init__.py +2 -0
- gaia/audio/audio_client.py +439 -0
- gaia/audio/audio_recorder.py +269 -0
- gaia/audio/kokoro_tts.py +599 -0
- gaia/audio/whisper_asr.py +432 -0
- gaia/chat/__init__.py +16 -0
- gaia/chat/app.py +430 -0
- gaia/chat/prompts.py +522 -0
- gaia/chat/sdk.py +1200 -0
- gaia/cli.py +5621 -0
- gaia/eval/batch_experiment.py +2332 -0
- gaia/eval/claude.py +542 -0
- gaia/eval/config.py +37 -0
- gaia/eval/email_generator.py +512 -0
- gaia/eval/eval.py +3179 -0
- gaia/eval/groundtruth.py +1130 -0
- gaia/eval/transcript_generator.py +582 -0
- gaia/eval/webapp/README.md +168 -0
- gaia/eval/webapp/node_modules/.bin/mime +16 -0
- gaia/eval/webapp/node_modules/.bin/mime.cmd +17 -0
- gaia/eval/webapp/node_modules/.bin/mime.ps1 +28 -0
- gaia/eval/webapp/node_modules/.package-lock.json +865 -0
- gaia/eval/webapp/node_modules/accepts/HISTORY.md +243 -0
- gaia/eval/webapp/node_modules/accepts/LICENSE +23 -0
- gaia/eval/webapp/node_modules/accepts/README.md +140 -0
- gaia/eval/webapp/node_modules/accepts/index.js +238 -0
- gaia/eval/webapp/node_modules/accepts/package.json +47 -0
- gaia/eval/webapp/node_modules/array-flatten/LICENSE +21 -0
- gaia/eval/webapp/node_modules/array-flatten/README.md +43 -0
- gaia/eval/webapp/node_modules/array-flatten/array-flatten.js +64 -0
- gaia/eval/webapp/node_modules/array-flatten/package.json +39 -0
- gaia/eval/webapp/node_modules/body-parser/HISTORY.md +672 -0
- gaia/eval/webapp/node_modules/body-parser/LICENSE +23 -0
- gaia/eval/webapp/node_modules/body-parser/README.md +476 -0
- gaia/eval/webapp/node_modules/body-parser/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/body-parser/index.js +156 -0
- gaia/eval/webapp/node_modules/body-parser/lib/read.js +205 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/json.js +247 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/raw.js +101 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/text.js +121 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/urlencoded.js +307 -0
- gaia/eval/webapp/node_modules/body-parser/package.json +56 -0
- gaia/eval/webapp/node_modules/bytes/History.md +97 -0
- gaia/eval/webapp/node_modules/bytes/LICENSE +23 -0
- gaia/eval/webapp/node_modules/bytes/Readme.md +152 -0
- gaia/eval/webapp/node_modules/bytes/index.js +170 -0
- gaia/eval/webapp/node_modules/bytes/package.json +42 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.eslintrc +17 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.nycrc +9 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/CHANGELOG.md +30 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/LICENSE +21 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/README.md +62 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/actualApply.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/actualApply.js +10 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/applyBind.d.ts +19 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/applyBind.js +10 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionApply.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionApply.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionCall.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionCall.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/index.d.ts +64 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/index.js +15 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/package.json +85 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/reflectApply.d.ts +3 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/reflectApply.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/test/index.js +63 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/call-bound/.eslintrc +13 -0
- gaia/eval/webapp/node_modules/call-bound/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/call-bound/.nycrc +9 -0
- gaia/eval/webapp/node_modules/call-bound/CHANGELOG.md +42 -0
- gaia/eval/webapp/node_modules/call-bound/LICENSE +21 -0
- gaia/eval/webapp/node_modules/call-bound/README.md +53 -0
- gaia/eval/webapp/node_modules/call-bound/index.d.ts +94 -0
- gaia/eval/webapp/node_modules/call-bound/index.js +19 -0
- gaia/eval/webapp/node_modules/call-bound/package.json +99 -0
- gaia/eval/webapp/node_modules/call-bound/test/index.js +61 -0
- gaia/eval/webapp/node_modules/call-bound/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/content-disposition/HISTORY.md +60 -0
- gaia/eval/webapp/node_modules/content-disposition/LICENSE +22 -0
- gaia/eval/webapp/node_modules/content-disposition/README.md +142 -0
- gaia/eval/webapp/node_modules/content-disposition/index.js +458 -0
- gaia/eval/webapp/node_modules/content-disposition/package.json +44 -0
- gaia/eval/webapp/node_modules/content-type/HISTORY.md +29 -0
- gaia/eval/webapp/node_modules/content-type/LICENSE +22 -0
- gaia/eval/webapp/node_modules/content-type/README.md +94 -0
- gaia/eval/webapp/node_modules/content-type/index.js +225 -0
- gaia/eval/webapp/node_modules/content-type/package.json +42 -0
- gaia/eval/webapp/node_modules/cookie/LICENSE +24 -0
- gaia/eval/webapp/node_modules/cookie/README.md +317 -0
- gaia/eval/webapp/node_modules/cookie/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/cookie/index.js +334 -0
- gaia/eval/webapp/node_modules/cookie/package.json +44 -0
- gaia/eval/webapp/node_modules/cookie-signature/.npmignore +4 -0
- gaia/eval/webapp/node_modules/cookie-signature/History.md +38 -0
- gaia/eval/webapp/node_modules/cookie-signature/Readme.md +42 -0
- gaia/eval/webapp/node_modules/cookie-signature/index.js +51 -0
- gaia/eval/webapp/node_modules/cookie-signature/package.json +18 -0
- gaia/eval/webapp/node_modules/debug/.coveralls.yml +1 -0
- gaia/eval/webapp/node_modules/debug/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/debug/.npmignore +9 -0
- gaia/eval/webapp/node_modules/debug/.travis.yml +14 -0
- gaia/eval/webapp/node_modules/debug/CHANGELOG.md +362 -0
- gaia/eval/webapp/node_modules/debug/LICENSE +19 -0
- gaia/eval/webapp/node_modules/debug/Makefile +50 -0
- gaia/eval/webapp/node_modules/debug/README.md +312 -0
- gaia/eval/webapp/node_modules/debug/component.json +19 -0
- gaia/eval/webapp/node_modules/debug/karma.conf.js +70 -0
- gaia/eval/webapp/node_modules/debug/node.js +1 -0
- gaia/eval/webapp/node_modules/debug/package.json +49 -0
- gaia/eval/webapp/node_modules/debug/src/browser.js +185 -0
- gaia/eval/webapp/node_modules/debug/src/debug.js +202 -0
- gaia/eval/webapp/node_modules/debug/src/index.js +10 -0
- gaia/eval/webapp/node_modules/debug/src/inspector-log.js +15 -0
- gaia/eval/webapp/node_modules/debug/src/node.js +248 -0
- gaia/eval/webapp/node_modules/depd/History.md +103 -0
- gaia/eval/webapp/node_modules/depd/LICENSE +22 -0
- gaia/eval/webapp/node_modules/depd/Readme.md +280 -0
- gaia/eval/webapp/node_modules/depd/index.js +538 -0
- gaia/eval/webapp/node_modules/depd/lib/browser/index.js +77 -0
- gaia/eval/webapp/node_modules/depd/package.json +45 -0
- gaia/eval/webapp/node_modules/destroy/LICENSE +23 -0
- gaia/eval/webapp/node_modules/destroy/README.md +63 -0
- gaia/eval/webapp/node_modules/destroy/index.js +209 -0
- gaia/eval/webapp/node_modules/destroy/package.json +48 -0
- gaia/eval/webapp/node_modules/dunder-proto/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/dunder-proto/.nycrc +13 -0
- gaia/eval/webapp/node_modules/dunder-proto/CHANGELOG.md +24 -0
- gaia/eval/webapp/node_modules/dunder-proto/LICENSE +21 -0
- gaia/eval/webapp/node_modules/dunder-proto/README.md +54 -0
- gaia/eval/webapp/node_modules/dunder-proto/get.d.ts +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/get.js +30 -0
- gaia/eval/webapp/node_modules/dunder-proto/package.json +76 -0
- gaia/eval/webapp/node_modules/dunder-proto/set.d.ts +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/set.js +35 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/get.js +34 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/index.js +4 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/set.js +50 -0
- gaia/eval/webapp/node_modules/dunder-proto/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/ee-first/LICENSE +22 -0
- gaia/eval/webapp/node_modules/ee-first/README.md +80 -0
- gaia/eval/webapp/node_modules/ee-first/index.js +95 -0
- gaia/eval/webapp/node_modules/ee-first/package.json +29 -0
- gaia/eval/webapp/node_modules/encodeurl/LICENSE +22 -0
- gaia/eval/webapp/node_modules/encodeurl/README.md +109 -0
- gaia/eval/webapp/node_modules/encodeurl/index.js +60 -0
- gaia/eval/webapp/node_modules/encodeurl/package.json +40 -0
- gaia/eval/webapp/node_modules/es-define-property/.eslintrc +13 -0
- gaia/eval/webapp/node_modules/es-define-property/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-define-property/.nycrc +9 -0
- gaia/eval/webapp/node_modules/es-define-property/CHANGELOG.md +29 -0
- gaia/eval/webapp/node_modules/es-define-property/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-define-property/README.md +49 -0
- gaia/eval/webapp/node_modules/es-define-property/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-define-property/index.js +14 -0
- gaia/eval/webapp/node_modules/es-define-property/package.json +81 -0
- gaia/eval/webapp/node_modules/es-define-property/test/index.js +56 -0
- gaia/eval/webapp/node_modules/es-define-property/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/es-errors/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/es-errors/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-errors/CHANGELOG.md +40 -0
- gaia/eval/webapp/node_modules/es-errors/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-errors/README.md +55 -0
- gaia/eval/webapp/node_modules/es-errors/eval.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/eval.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/index.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/package.json +80 -0
- gaia/eval/webapp/node_modules/es-errors/range.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/range.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/ref.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/ref.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/syntax.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/syntax.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/test/index.js +19 -0
- gaia/eval/webapp/node_modules/es-errors/tsconfig.json +49 -0
- gaia/eval/webapp/node_modules/es-errors/type.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/type.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/uri.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/uri.js +4 -0
- gaia/eval/webapp/node_modules/es-object-atoms/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/es-object-atoms/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-object-atoms/CHANGELOG.md +37 -0
- gaia/eval/webapp/node_modules/es-object-atoms/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-object-atoms/README.md +63 -0
- gaia/eval/webapp/node_modules/es-object-atoms/RequireObjectCoercible.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/RequireObjectCoercible.js +11 -0
- gaia/eval/webapp/node_modules/es-object-atoms/ToObject.d.ts +7 -0
- gaia/eval/webapp/node_modules/es-object-atoms/ToObject.js +10 -0
- gaia/eval/webapp/node_modules/es-object-atoms/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/index.js +4 -0
- gaia/eval/webapp/node_modules/es-object-atoms/isObject.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/isObject.js +6 -0
- gaia/eval/webapp/node_modules/es-object-atoms/package.json +80 -0
- gaia/eval/webapp/node_modules/es-object-atoms/test/index.js +38 -0
- gaia/eval/webapp/node_modules/es-object-atoms/tsconfig.json +6 -0
- gaia/eval/webapp/node_modules/escape-html/LICENSE +24 -0
- gaia/eval/webapp/node_modules/escape-html/Readme.md +43 -0
- gaia/eval/webapp/node_modules/escape-html/index.js +78 -0
- gaia/eval/webapp/node_modules/escape-html/package.json +24 -0
- gaia/eval/webapp/node_modules/etag/HISTORY.md +83 -0
- gaia/eval/webapp/node_modules/etag/LICENSE +22 -0
- gaia/eval/webapp/node_modules/etag/README.md +159 -0
- gaia/eval/webapp/node_modules/etag/index.js +131 -0
- gaia/eval/webapp/node_modules/etag/package.json +47 -0
- gaia/eval/webapp/node_modules/express/History.md +3656 -0
- gaia/eval/webapp/node_modules/express/LICENSE +24 -0
- gaia/eval/webapp/node_modules/express/Readme.md +260 -0
- gaia/eval/webapp/node_modules/express/index.js +11 -0
- gaia/eval/webapp/node_modules/express/lib/application.js +661 -0
- gaia/eval/webapp/node_modules/express/lib/express.js +116 -0
- gaia/eval/webapp/node_modules/express/lib/middleware/init.js +43 -0
- gaia/eval/webapp/node_modules/express/lib/middleware/query.js +47 -0
- gaia/eval/webapp/node_modules/express/lib/request.js +525 -0
- gaia/eval/webapp/node_modules/express/lib/response.js +1179 -0
- gaia/eval/webapp/node_modules/express/lib/router/index.js +673 -0
- gaia/eval/webapp/node_modules/express/lib/router/layer.js +181 -0
- gaia/eval/webapp/node_modules/express/lib/router/route.js +230 -0
- gaia/eval/webapp/node_modules/express/lib/utils.js +303 -0
- gaia/eval/webapp/node_modules/express/lib/view.js +182 -0
- gaia/eval/webapp/node_modules/express/package.json +102 -0
- gaia/eval/webapp/node_modules/finalhandler/HISTORY.md +210 -0
- gaia/eval/webapp/node_modules/finalhandler/LICENSE +22 -0
- gaia/eval/webapp/node_modules/finalhandler/README.md +147 -0
- gaia/eval/webapp/node_modules/finalhandler/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/finalhandler/index.js +341 -0
- gaia/eval/webapp/node_modules/finalhandler/package.json +47 -0
- gaia/eval/webapp/node_modules/forwarded/HISTORY.md +21 -0
- gaia/eval/webapp/node_modules/forwarded/LICENSE +22 -0
- gaia/eval/webapp/node_modules/forwarded/README.md +57 -0
- gaia/eval/webapp/node_modules/forwarded/index.js +90 -0
- gaia/eval/webapp/node_modules/forwarded/package.json +45 -0
- gaia/eval/webapp/node_modules/fresh/HISTORY.md +70 -0
- gaia/eval/webapp/node_modules/fresh/LICENSE +23 -0
- gaia/eval/webapp/node_modules/fresh/README.md +119 -0
- gaia/eval/webapp/node_modules/fresh/index.js +137 -0
- gaia/eval/webapp/node_modules/fresh/package.json +46 -0
- gaia/eval/webapp/node_modules/fs/README.md +9 -0
- gaia/eval/webapp/node_modules/fs/package.json +20 -0
- gaia/eval/webapp/node_modules/function-bind/.eslintrc +21 -0
- gaia/eval/webapp/node_modules/function-bind/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/function-bind/.github/SECURITY.md +3 -0
- gaia/eval/webapp/node_modules/function-bind/.nycrc +13 -0
- gaia/eval/webapp/node_modules/function-bind/CHANGELOG.md +136 -0
- gaia/eval/webapp/node_modules/function-bind/LICENSE +20 -0
- gaia/eval/webapp/node_modules/function-bind/README.md +46 -0
- gaia/eval/webapp/node_modules/function-bind/implementation.js +84 -0
- gaia/eval/webapp/node_modules/function-bind/index.js +5 -0
- gaia/eval/webapp/node_modules/function-bind/package.json +87 -0
- gaia/eval/webapp/node_modules/function-bind/test/.eslintrc +9 -0
- gaia/eval/webapp/node_modules/function-bind/test/index.js +252 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.eslintrc +42 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.nycrc +9 -0
- gaia/eval/webapp/node_modules/get-intrinsic/CHANGELOG.md +186 -0
- gaia/eval/webapp/node_modules/get-intrinsic/LICENSE +21 -0
- gaia/eval/webapp/node_modules/get-intrinsic/README.md +71 -0
- gaia/eval/webapp/node_modules/get-intrinsic/index.js +378 -0
- gaia/eval/webapp/node_modules/get-intrinsic/package.json +97 -0
- gaia/eval/webapp/node_modules/get-intrinsic/test/GetIntrinsic.js +274 -0
- gaia/eval/webapp/node_modules/get-proto/.eslintrc +10 -0
- gaia/eval/webapp/node_modules/get-proto/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/get-proto/.nycrc +9 -0
- gaia/eval/webapp/node_modules/get-proto/CHANGELOG.md +21 -0
- gaia/eval/webapp/node_modules/get-proto/LICENSE +21 -0
- gaia/eval/webapp/node_modules/get-proto/Object.getPrototypeOf.d.ts +5 -0
- gaia/eval/webapp/node_modules/get-proto/Object.getPrototypeOf.js +6 -0
- gaia/eval/webapp/node_modules/get-proto/README.md +50 -0
- gaia/eval/webapp/node_modules/get-proto/Reflect.getPrototypeOf.d.ts +3 -0
- gaia/eval/webapp/node_modules/get-proto/Reflect.getPrototypeOf.js +4 -0
- gaia/eval/webapp/node_modules/get-proto/index.d.ts +5 -0
- gaia/eval/webapp/node_modules/get-proto/index.js +27 -0
- gaia/eval/webapp/node_modules/get-proto/package.json +81 -0
- gaia/eval/webapp/node_modules/get-proto/test/index.js +68 -0
- gaia/eval/webapp/node_modules/get-proto/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/gopd/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/gopd/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/gopd/CHANGELOG.md +45 -0
- gaia/eval/webapp/node_modules/gopd/LICENSE +21 -0
- gaia/eval/webapp/node_modules/gopd/README.md +40 -0
- gaia/eval/webapp/node_modules/gopd/gOPD.d.ts +1 -0
- gaia/eval/webapp/node_modules/gopd/gOPD.js +4 -0
- gaia/eval/webapp/node_modules/gopd/index.d.ts +5 -0
- gaia/eval/webapp/node_modules/gopd/index.js +15 -0
- gaia/eval/webapp/node_modules/gopd/package.json +77 -0
- gaia/eval/webapp/node_modules/gopd/test/index.js +36 -0
- gaia/eval/webapp/node_modules/gopd/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/has-symbols/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/has-symbols/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/has-symbols/.nycrc +9 -0
- gaia/eval/webapp/node_modules/has-symbols/CHANGELOG.md +91 -0
- gaia/eval/webapp/node_modules/has-symbols/LICENSE +21 -0
- gaia/eval/webapp/node_modules/has-symbols/README.md +46 -0
- gaia/eval/webapp/node_modules/has-symbols/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/has-symbols/index.js +14 -0
- gaia/eval/webapp/node_modules/has-symbols/package.json +111 -0
- gaia/eval/webapp/node_modules/has-symbols/shams.d.ts +3 -0
- gaia/eval/webapp/node_modules/has-symbols/shams.js +45 -0
- gaia/eval/webapp/node_modules/has-symbols/test/index.js +22 -0
- gaia/eval/webapp/node_modules/has-symbols/test/shams/core-js.js +29 -0
- gaia/eval/webapp/node_modules/has-symbols/test/shams/get-own-property-symbols.js +29 -0
- gaia/eval/webapp/node_modules/has-symbols/test/tests.js +58 -0
- gaia/eval/webapp/node_modules/has-symbols/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/hasown/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/hasown/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/hasown/.nycrc +13 -0
- gaia/eval/webapp/node_modules/hasown/CHANGELOG.md +40 -0
- gaia/eval/webapp/node_modules/hasown/LICENSE +21 -0
- gaia/eval/webapp/node_modules/hasown/README.md +40 -0
- gaia/eval/webapp/node_modules/hasown/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/hasown/index.js +8 -0
- gaia/eval/webapp/node_modules/hasown/package.json +92 -0
- gaia/eval/webapp/node_modules/hasown/tsconfig.json +6 -0
- gaia/eval/webapp/node_modules/http-errors/HISTORY.md +180 -0
- gaia/eval/webapp/node_modules/http-errors/LICENSE +23 -0
- gaia/eval/webapp/node_modules/http-errors/README.md +169 -0
- gaia/eval/webapp/node_modules/http-errors/index.js +289 -0
- gaia/eval/webapp/node_modules/http-errors/package.json +50 -0
- gaia/eval/webapp/node_modules/iconv-lite/Changelog.md +162 -0
- gaia/eval/webapp/node_modules/iconv-lite/LICENSE +21 -0
- gaia/eval/webapp/node_modules/iconv-lite/README.md +156 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/dbcs-codec.js +555 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/dbcs-data.js +176 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/index.js +22 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/internal.js +188 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-codec.js +72 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-data-generated.js +451 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-data.js +174 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/big5-added.json +122 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp936.json +264 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp949.json +273 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp950.json +177 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/eucjp.json +182 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/gb18030-ranges.json +1 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/gbk-added.json +55 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/shiftjis.json +125 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/utf16.js +177 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/utf7.js +290 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/bom-handling.js +52 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/extend-node.js +217 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/index.d.ts +24 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/index.js +153 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/streams.js +121 -0
- gaia/eval/webapp/node_modules/iconv-lite/package.json +46 -0
- gaia/eval/webapp/node_modules/inherits/LICENSE +16 -0
- gaia/eval/webapp/node_modules/inherits/README.md +42 -0
- gaia/eval/webapp/node_modules/inherits/inherits.js +9 -0
- gaia/eval/webapp/node_modules/inherits/inherits_browser.js +27 -0
- gaia/eval/webapp/node_modules/inherits/package.json +29 -0
- gaia/eval/webapp/node_modules/ipaddr.js/LICENSE +19 -0
- gaia/eval/webapp/node_modules/ipaddr.js/README.md +233 -0
- gaia/eval/webapp/node_modules/ipaddr.js/ipaddr.min.js +1 -0
- gaia/eval/webapp/node_modules/ipaddr.js/lib/ipaddr.js +673 -0
- gaia/eval/webapp/node_modules/ipaddr.js/lib/ipaddr.js.d.ts +68 -0
- gaia/eval/webapp/node_modules/ipaddr.js/package.json +35 -0
- gaia/eval/webapp/node_modules/math-intrinsics/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/math-intrinsics/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/math-intrinsics/CHANGELOG.md +24 -0
- gaia/eval/webapp/node_modules/math-intrinsics/LICENSE +21 -0
- gaia/eval/webapp/node_modules/math-intrinsics/README.md +50 -0
- gaia/eval/webapp/node_modules/math-intrinsics/abs.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/abs.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxArrayLength.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxArrayLength.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxSafeInteger.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxSafeInteger.js +5 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxValue.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxValue.js +5 -0
- gaia/eval/webapp/node_modules/math-intrinsics/floor.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/floor.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isFinite.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isFinite.js +12 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isInteger.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isInteger.js +16 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNaN.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNaN.js +6 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNegativeZero.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNegativeZero.js +6 -0
- gaia/eval/webapp/node_modules/math-intrinsics/max.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/max.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/min.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/min.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/mod.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/mod.js +9 -0
- gaia/eval/webapp/node_modules/math-intrinsics/package.json +86 -0
- gaia/eval/webapp/node_modules/math-intrinsics/pow.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/pow.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/round.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/round.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/sign.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/sign.js +11 -0
- gaia/eval/webapp/node_modules/math-intrinsics/test/index.js +192 -0
- gaia/eval/webapp/node_modules/math-intrinsics/tsconfig.json +3 -0
- gaia/eval/webapp/node_modules/media-typer/HISTORY.md +22 -0
- gaia/eval/webapp/node_modules/media-typer/LICENSE +22 -0
- gaia/eval/webapp/node_modules/media-typer/README.md +81 -0
- gaia/eval/webapp/node_modules/media-typer/index.js +270 -0
- gaia/eval/webapp/node_modules/media-typer/package.json +26 -0
- gaia/eval/webapp/node_modules/merge-descriptors/HISTORY.md +21 -0
- gaia/eval/webapp/node_modules/merge-descriptors/LICENSE +23 -0
- gaia/eval/webapp/node_modules/merge-descriptors/README.md +49 -0
- gaia/eval/webapp/node_modules/merge-descriptors/index.js +60 -0
- gaia/eval/webapp/node_modules/merge-descriptors/package.json +39 -0
- gaia/eval/webapp/node_modules/methods/HISTORY.md +29 -0
- gaia/eval/webapp/node_modules/methods/LICENSE +24 -0
- gaia/eval/webapp/node_modules/methods/README.md +51 -0
- gaia/eval/webapp/node_modules/methods/index.js +69 -0
- gaia/eval/webapp/node_modules/methods/package.json +36 -0
- gaia/eval/webapp/node_modules/mime/.npmignore +0 -0
- gaia/eval/webapp/node_modules/mime/CHANGELOG.md +164 -0
- gaia/eval/webapp/node_modules/mime/LICENSE +21 -0
- gaia/eval/webapp/node_modules/mime/README.md +90 -0
- gaia/eval/webapp/node_modules/mime/cli.js +8 -0
- gaia/eval/webapp/node_modules/mime/mime.js +108 -0
- gaia/eval/webapp/node_modules/mime/package.json +44 -0
- gaia/eval/webapp/node_modules/mime/src/build.js +53 -0
- gaia/eval/webapp/node_modules/mime/src/test.js +60 -0
- gaia/eval/webapp/node_modules/mime/types.json +1 -0
- gaia/eval/webapp/node_modules/mime-db/HISTORY.md +507 -0
- gaia/eval/webapp/node_modules/mime-db/LICENSE +23 -0
- gaia/eval/webapp/node_modules/mime-db/README.md +100 -0
- gaia/eval/webapp/node_modules/mime-db/db.json +8519 -0
- gaia/eval/webapp/node_modules/mime-db/index.js +12 -0
- gaia/eval/webapp/node_modules/mime-db/package.json +60 -0
- gaia/eval/webapp/node_modules/mime-types/HISTORY.md +397 -0
- gaia/eval/webapp/node_modules/mime-types/LICENSE +23 -0
- gaia/eval/webapp/node_modules/mime-types/README.md +113 -0
- gaia/eval/webapp/node_modules/mime-types/index.js +188 -0
- gaia/eval/webapp/node_modules/mime-types/package.json +44 -0
- gaia/eval/webapp/node_modules/ms/index.js +152 -0
- gaia/eval/webapp/node_modules/ms/license.md +21 -0
- gaia/eval/webapp/node_modules/ms/package.json +37 -0
- gaia/eval/webapp/node_modules/ms/readme.md +51 -0
- gaia/eval/webapp/node_modules/negotiator/HISTORY.md +108 -0
- gaia/eval/webapp/node_modules/negotiator/LICENSE +24 -0
- gaia/eval/webapp/node_modules/negotiator/README.md +203 -0
- gaia/eval/webapp/node_modules/negotiator/index.js +82 -0
- gaia/eval/webapp/node_modules/negotiator/lib/charset.js +169 -0
- gaia/eval/webapp/node_modules/negotiator/lib/encoding.js +184 -0
- gaia/eval/webapp/node_modules/negotiator/lib/language.js +179 -0
- gaia/eval/webapp/node_modules/negotiator/lib/mediaType.js +294 -0
- gaia/eval/webapp/node_modules/negotiator/package.json +42 -0
- gaia/eval/webapp/node_modules/object-inspect/.eslintrc +53 -0
- gaia/eval/webapp/node_modules/object-inspect/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/object-inspect/.nycrc +13 -0
- gaia/eval/webapp/node_modules/object-inspect/CHANGELOG.md +424 -0
- gaia/eval/webapp/node_modules/object-inspect/LICENSE +21 -0
- gaia/eval/webapp/node_modules/object-inspect/example/all.js +23 -0
- gaia/eval/webapp/node_modules/object-inspect/example/circular.js +6 -0
- gaia/eval/webapp/node_modules/object-inspect/example/fn.js +5 -0
- gaia/eval/webapp/node_modules/object-inspect/example/inspect.js +10 -0
- gaia/eval/webapp/node_modules/object-inspect/index.js +544 -0
- gaia/eval/webapp/node_modules/object-inspect/package-support.json +20 -0
- gaia/eval/webapp/node_modules/object-inspect/package.json +105 -0
- gaia/eval/webapp/node_modules/object-inspect/readme.markdown +84 -0
- gaia/eval/webapp/node_modules/object-inspect/test/bigint.js +58 -0
- gaia/eval/webapp/node_modules/object-inspect/test/browser/dom.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/circular.js +16 -0
- gaia/eval/webapp/node_modules/object-inspect/test/deep.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/element.js +53 -0
- gaia/eval/webapp/node_modules/object-inspect/test/err.js +48 -0
- gaia/eval/webapp/node_modules/object-inspect/test/fakes.js +29 -0
- gaia/eval/webapp/node_modules/object-inspect/test/fn.js +76 -0
- gaia/eval/webapp/node_modules/object-inspect/test/global.js +17 -0
- gaia/eval/webapp/node_modules/object-inspect/test/has.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/holes.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/indent-option.js +271 -0
- gaia/eval/webapp/node_modules/object-inspect/test/inspect.js +139 -0
- gaia/eval/webapp/node_modules/object-inspect/test/lowbyte.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/number.js +58 -0
- gaia/eval/webapp/node_modules/object-inspect/test/quoteStyle.js +26 -0
- gaia/eval/webapp/node_modules/object-inspect/test/toStringTag.js +40 -0
- gaia/eval/webapp/node_modules/object-inspect/test/undef.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/values.js +261 -0
- gaia/eval/webapp/node_modules/object-inspect/test-core-js.js +26 -0
- gaia/eval/webapp/node_modules/object-inspect/util.inspect.js +1 -0
- gaia/eval/webapp/node_modules/on-finished/HISTORY.md +98 -0
- gaia/eval/webapp/node_modules/on-finished/LICENSE +23 -0
- gaia/eval/webapp/node_modules/on-finished/README.md +162 -0
- gaia/eval/webapp/node_modules/on-finished/index.js +234 -0
- gaia/eval/webapp/node_modules/on-finished/package.json +39 -0
- gaia/eval/webapp/node_modules/parseurl/HISTORY.md +58 -0
- gaia/eval/webapp/node_modules/parseurl/LICENSE +24 -0
- gaia/eval/webapp/node_modules/parseurl/README.md +133 -0
- gaia/eval/webapp/node_modules/parseurl/index.js +158 -0
- gaia/eval/webapp/node_modules/parseurl/package.json +40 -0
- gaia/eval/webapp/node_modules/path/.npmignore +1 -0
- gaia/eval/webapp/node_modules/path/LICENSE +18 -0
- gaia/eval/webapp/node_modules/path/README.md +15 -0
- gaia/eval/webapp/node_modules/path/package.json +24 -0
- gaia/eval/webapp/node_modules/path/path.js +628 -0
- gaia/eval/webapp/node_modules/path-to-regexp/LICENSE +21 -0
- gaia/eval/webapp/node_modules/path-to-regexp/Readme.md +35 -0
- gaia/eval/webapp/node_modules/path-to-regexp/index.js +156 -0
- gaia/eval/webapp/node_modules/path-to-regexp/package.json +30 -0
- gaia/eval/webapp/node_modules/process/.eslintrc +21 -0
- gaia/eval/webapp/node_modules/process/LICENSE +22 -0
- gaia/eval/webapp/node_modules/process/README.md +26 -0
- gaia/eval/webapp/node_modules/process/browser.js +184 -0
- gaia/eval/webapp/node_modules/process/index.js +2 -0
- gaia/eval/webapp/node_modules/process/package.json +27 -0
- gaia/eval/webapp/node_modules/process/test.js +199 -0
- gaia/eval/webapp/node_modules/proxy-addr/HISTORY.md +161 -0
- gaia/eval/webapp/node_modules/proxy-addr/LICENSE +22 -0
- gaia/eval/webapp/node_modules/proxy-addr/README.md +139 -0
- gaia/eval/webapp/node_modules/proxy-addr/index.js +327 -0
- gaia/eval/webapp/node_modules/proxy-addr/package.json +47 -0
- gaia/eval/webapp/node_modules/qs/.editorconfig +46 -0
- gaia/eval/webapp/node_modules/qs/.eslintrc +38 -0
- gaia/eval/webapp/node_modules/qs/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/qs/.nycrc +13 -0
- gaia/eval/webapp/node_modules/qs/CHANGELOG.md +600 -0
- gaia/eval/webapp/node_modules/qs/LICENSE.md +29 -0
- gaia/eval/webapp/node_modules/qs/README.md +709 -0
- gaia/eval/webapp/node_modules/qs/dist/qs.js +90 -0
- gaia/eval/webapp/node_modules/qs/lib/formats.js +23 -0
- gaia/eval/webapp/node_modules/qs/lib/index.js +11 -0
- gaia/eval/webapp/node_modules/qs/lib/parse.js +296 -0
- gaia/eval/webapp/node_modules/qs/lib/stringify.js +351 -0
- gaia/eval/webapp/node_modules/qs/lib/utils.js +265 -0
- gaia/eval/webapp/node_modules/qs/package.json +91 -0
- gaia/eval/webapp/node_modules/qs/test/empty-keys-cases.js +267 -0
- gaia/eval/webapp/node_modules/qs/test/parse.js +1170 -0
- gaia/eval/webapp/node_modules/qs/test/stringify.js +1298 -0
- gaia/eval/webapp/node_modules/qs/test/utils.js +136 -0
- gaia/eval/webapp/node_modules/range-parser/HISTORY.md +56 -0
- gaia/eval/webapp/node_modules/range-parser/LICENSE +23 -0
- gaia/eval/webapp/node_modules/range-parser/README.md +84 -0
- gaia/eval/webapp/node_modules/range-parser/index.js +162 -0
- gaia/eval/webapp/node_modules/range-parser/package.json +44 -0
- gaia/eval/webapp/node_modules/raw-body/HISTORY.md +308 -0
- gaia/eval/webapp/node_modules/raw-body/LICENSE +22 -0
- gaia/eval/webapp/node_modules/raw-body/README.md +223 -0
- gaia/eval/webapp/node_modules/raw-body/SECURITY.md +24 -0
- gaia/eval/webapp/node_modules/raw-body/index.d.ts +87 -0
- gaia/eval/webapp/node_modules/raw-body/index.js +336 -0
- gaia/eval/webapp/node_modules/raw-body/package.json +49 -0
- gaia/eval/webapp/node_modules/safe-buffer/LICENSE +21 -0
- gaia/eval/webapp/node_modules/safe-buffer/README.md +584 -0
- gaia/eval/webapp/node_modules/safe-buffer/index.d.ts +187 -0
- gaia/eval/webapp/node_modules/safe-buffer/index.js +65 -0
- gaia/eval/webapp/node_modules/safe-buffer/package.json +51 -0
- gaia/eval/webapp/node_modules/safer-buffer/LICENSE +21 -0
- gaia/eval/webapp/node_modules/safer-buffer/Porting-Buffer.md +268 -0
- gaia/eval/webapp/node_modules/safer-buffer/Readme.md +156 -0
- gaia/eval/webapp/node_modules/safer-buffer/dangerous.js +58 -0
- gaia/eval/webapp/node_modules/safer-buffer/package.json +34 -0
- gaia/eval/webapp/node_modules/safer-buffer/safer.js +77 -0
- gaia/eval/webapp/node_modules/safer-buffer/tests.js +406 -0
- gaia/eval/webapp/node_modules/send/HISTORY.md +526 -0
- gaia/eval/webapp/node_modules/send/LICENSE +23 -0
- gaia/eval/webapp/node_modules/send/README.md +327 -0
- gaia/eval/webapp/node_modules/send/SECURITY.md +24 -0
- gaia/eval/webapp/node_modules/send/index.js +1142 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/HISTORY.md +14 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/LICENSE +22 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/README.md +128 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/index.js +60 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/package.json +40 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/index.js +162 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/license.md +21 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/package.json +38 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/readme.md +59 -0
- gaia/eval/webapp/node_modules/send/package.json +62 -0
- gaia/eval/webapp/node_modules/serve-static/HISTORY.md +487 -0
- gaia/eval/webapp/node_modules/serve-static/LICENSE +25 -0
- gaia/eval/webapp/node_modules/serve-static/README.md +257 -0
- gaia/eval/webapp/node_modules/serve-static/index.js +209 -0
- gaia/eval/webapp/node_modules/serve-static/package.json +42 -0
- gaia/eval/webapp/node_modules/setprototypeof/LICENSE +13 -0
- gaia/eval/webapp/node_modules/setprototypeof/README.md +31 -0
- gaia/eval/webapp/node_modules/setprototypeof/index.d.ts +2 -0
- gaia/eval/webapp/node_modules/setprototypeof/index.js +17 -0
- gaia/eval/webapp/node_modules/setprototypeof/package.json +38 -0
- gaia/eval/webapp/node_modules/setprototypeof/test/index.js +24 -0
- gaia/eval/webapp/node_modules/side-channel/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel/.eslintrc +12 -0
- gaia/eval/webapp/node_modules/side-channel/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel/CHANGELOG.md +110 -0
- gaia/eval/webapp/node_modules/side-channel/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel/README.md +61 -0
- gaia/eval/webapp/node_modules/side-channel/index.d.ts +14 -0
- gaia/eval/webapp/node_modules/side-channel/index.js +43 -0
- gaia/eval/webapp/node_modules/side-channel/package.json +85 -0
- gaia/eval/webapp/node_modules/side-channel/test/index.js +104 -0
- gaia/eval/webapp/node_modules/side-channel/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-list/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-list/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/side-channel-list/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-list/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-list/CHANGELOG.md +15 -0
- gaia/eval/webapp/node_modules/side-channel-list/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-list/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-list/index.d.ts +13 -0
- gaia/eval/webapp/node_modules/side-channel-list/index.js +113 -0
- gaia/eval/webapp/node_modules/side-channel-list/list.d.ts +14 -0
- gaia/eval/webapp/node_modules/side-channel-list/package.json +77 -0
- gaia/eval/webapp/node_modules/side-channel-list/test/index.js +104 -0
- gaia/eval/webapp/node_modules/side-channel-list/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-map/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-map/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/side-channel-map/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-map/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-map/CHANGELOG.md +22 -0
- gaia/eval/webapp/node_modules/side-channel-map/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-map/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-map/index.d.ts +15 -0
- gaia/eval/webapp/node_modules/side-channel-map/index.js +68 -0
- gaia/eval/webapp/node_modules/side-channel-map/package.json +80 -0
- gaia/eval/webapp/node_modules/side-channel-map/test/index.js +114 -0
- gaia/eval/webapp/node_modules/side-channel-map/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.eslintrc +12 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/CHANGELOG.md +28 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/index.d.ts +15 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/index.js +84 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/package.json +87 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/test/index.js +114 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/statuses/HISTORY.md +82 -0
- gaia/eval/webapp/node_modules/statuses/LICENSE +23 -0
- gaia/eval/webapp/node_modules/statuses/README.md +136 -0
- gaia/eval/webapp/node_modules/statuses/codes.json +65 -0
- gaia/eval/webapp/node_modules/statuses/index.js +146 -0
- gaia/eval/webapp/node_modules/statuses/package.json +49 -0
- gaia/eval/webapp/node_modules/toidentifier/HISTORY.md +9 -0
- gaia/eval/webapp/node_modules/toidentifier/LICENSE +21 -0
- gaia/eval/webapp/node_modules/toidentifier/README.md +61 -0
- gaia/eval/webapp/node_modules/toidentifier/index.js +32 -0
- gaia/eval/webapp/node_modules/toidentifier/package.json +38 -0
- gaia/eval/webapp/node_modules/type-is/HISTORY.md +259 -0
- gaia/eval/webapp/node_modules/type-is/LICENSE +23 -0
- gaia/eval/webapp/node_modules/type-is/README.md +170 -0
- gaia/eval/webapp/node_modules/type-is/index.js +266 -0
- gaia/eval/webapp/node_modules/type-is/package.json +45 -0
- gaia/eval/webapp/node_modules/unpipe/HISTORY.md +4 -0
- gaia/eval/webapp/node_modules/unpipe/LICENSE +22 -0
- gaia/eval/webapp/node_modules/unpipe/README.md +43 -0
- gaia/eval/webapp/node_modules/unpipe/index.js +69 -0
- gaia/eval/webapp/node_modules/unpipe/package.json +27 -0
- gaia/eval/webapp/node_modules/util/LICENSE +18 -0
- gaia/eval/webapp/node_modules/util/README.md +15 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/LICENSE +16 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/README.md +42 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/inherits.js +7 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/inherits_browser.js +23 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/package.json +29 -0
- gaia/eval/webapp/node_modules/util/package.json +35 -0
- gaia/eval/webapp/node_modules/util/support/isBuffer.js +3 -0
- gaia/eval/webapp/node_modules/util/support/isBufferBrowser.js +6 -0
- gaia/eval/webapp/node_modules/util/util.js +586 -0
- gaia/eval/webapp/node_modules/utils-merge/.npmignore +9 -0
- gaia/eval/webapp/node_modules/utils-merge/LICENSE +20 -0
- gaia/eval/webapp/node_modules/utils-merge/README.md +34 -0
- gaia/eval/webapp/node_modules/utils-merge/index.js +23 -0
- gaia/eval/webapp/node_modules/utils-merge/package.json +40 -0
- gaia/eval/webapp/node_modules/vary/HISTORY.md +39 -0
- gaia/eval/webapp/node_modules/vary/LICENSE +22 -0
- gaia/eval/webapp/node_modules/vary/README.md +101 -0
- gaia/eval/webapp/node_modules/vary/index.js +149 -0
- gaia/eval/webapp/node_modules/vary/package.json +43 -0
- gaia/eval/webapp/package-lock.json +875 -0
- gaia/eval/webapp/package.json +21 -0
- gaia/eval/webapp/public/app.js +3403 -0
- gaia/eval/webapp/public/index.html +88 -0
- gaia/eval/webapp/public/styles.css +3661 -0
- gaia/eval/webapp/server.js +416 -0
- gaia/eval/webapp/test-setup.js +73 -0
- gaia/llm/__init__.py +2 -0
- gaia/llm/lemonade_client.py +3083 -0
- gaia/llm/lemonade_manager.py +269 -0
- gaia/llm/llm_client.py +729 -0
- gaia/llm/vlm_client.py +307 -0
- gaia/logger.py +189 -0
- gaia/mcp/agent_mcp_server.py +245 -0
- gaia/mcp/blender_mcp_client.py +138 -0
- gaia/mcp/blender_mcp_server.py +648 -0
- gaia/mcp/context7_cache.py +332 -0
- gaia/mcp/external_services.py +518 -0
- gaia/mcp/mcp_bridge.py +550 -0
- gaia/mcp/servers/__init__.py +6 -0
- gaia/mcp/servers/docker_mcp.py +83 -0
- gaia/rag/__init__.py +10 -0
- gaia/rag/app.py +293 -0
- gaia/rag/demo.py +304 -0
- gaia/rag/pdf_utils.py +235 -0
- gaia/rag/sdk.py +2194 -0
- gaia/security.py +163 -0
- gaia/talk/app.py +289 -0
- gaia/talk/sdk.py +538 -0
- gaia/util.py +46 -0
- gaia/version.py +100 -0
|
@@ -0,0 +1,2072 @@
|
|
|
1
|
+
# Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
"""
|
|
4
|
+
Generic Agent class for building domain-specific agents.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
# Standard library imports
|
|
8
|
+
import abc
|
|
9
|
+
import datetime
|
|
10
|
+
import inspect
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
import re
|
|
15
|
+
import subprocess
|
|
16
|
+
import uuid
|
|
17
|
+
from typing import Any, Dict, List, Optional
|
|
18
|
+
|
|
19
|
+
from gaia.agents.base.console import AgentConsole, SilentConsole
|
|
20
|
+
from gaia.agents.base.tools import _TOOL_REGISTRY
|
|
21
|
+
|
|
22
|
+
# First-party imports
|
|
23
|
+
from gaia.chat.sdk import ChatConfig, ChatSDK
|
|
24
|
+
|
|
25
|
+
# Set up logging
|
|
26
|
+
logging.basicConfig(level=logging.INFO)
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
# Content truncation thresholds
|
|
30
|
+
CHUNK_TRUNCATION_THRESHOLD = 5000
|
|
31
|
+
CHUNK_TRUNCATION_SIZE = 2500
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Agent(abc.ABC):
|
|
35
|
+
"""
|
|
36
|
+
Base Agent class that provides core functionality for domain-specific agents.
|
|
37
|
+
|
|
38
|
+
The Agent class handles the core conversation loop, tool execution, and LLM
|
|
39
|
+
interaction patterns. It provides:
|
|
40
|
+
- Conversation management with an LLM
|
|
41
|
+
- Tool registration and execution framework
|
|
42
|
+
- JSON response parsing and validation
|
|
43
|
+
- Error handling and recovery
|
|
44
|
+
- State management for multi-step plans
|
|
45
|
+
- Output formatting and file writing
|
|
46
|
+
- Configurable prompt display for debugging
|
|
47
|
+
|
|
48
|
+
Key Parameters:
|
|
49
|
+
debug: Enable general debug output and logging
|
|
50
|
+
show_prompts: Display prompts sent to LLM (useful for debugging prompts)
|
|
51
|
+
debug_prompts: Include prompts in conversation history for analysis
|
|
52
|
+
streaming: Enable real-time streaming of LLM responses
|
|
53
|
+
silent_mode: Suppress all console output for JSON-only usage
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
# Define state constants
|
|
57
|
+
STATE_PLANNING = "PLANNING"
|
|
58
|
+
STATE_EXECUTING_PLAN = "EXECUTING_PLAN"
|
|
59
|
+
STATE_DIRECT_EXECUTION = "DIRECT_EXECUTION"
|
|
60
|
+
STATE_ERROR_RECOVERY = "ERROR_RECOVERY"
|
|
61
|
+
STATE_COMPLETION = "COMPLETION"
|
|
62
|
+
|
|
63
|
+
# Define tools that can execute directly without requiring a plan
|
|
64
|
+
# Subclasses can override this to specify domain-specific simple tools
|
|
65
|
+
SIMPLE_TOOLS = []
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
use_claude: bool = False,
|
|
70
|
+
use_chatgpt: bool = False,
|
|
71
|
+
claude_model: str = "claude-sonnet-4-20250514",
|
|
72
|
+
base_url: Optional[str] = None,
|
|
73
|
+
model_id: str = None,
|
|
74
|
+
max_steps: int = 5,
|
|
75
|
+
debug_prompts: bool = False,
|
|
76
|
+
show_prompts: bool = False,
|
|
77
|
+
output_dir: str = None,
|
|
78
|
+
streaming: bool = False,
|
|
79
|
+
show_stats: bool = False,
|
|
80
|
+
silent_mode: bool = False,
|
|
81
|
+
debug: bool = False,
|
|
82
|
+
output_handler=None,
|
|
83
|
+
max_plan_iterations: int = 3,
|
|
84
|
+
min_context_size: int = 32768,
|
|
85
|
+
skip_lemonade: bool = False,
|
|
86
|
+
):
|
|
87
|
+
"""
|
|
88
|
+
Initialize the Agent with LLM client.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
use_claude: If True, uses Claude API (default: False)
|
|
92
|
+
use_chatgpt: If True, uses ChatGPT/OpenAI API (default: False)
|
|
93
|
+
claude_model: Claude model to use when use_claude=True (default: "claude-sonnet-4-20250514")
|
|
94
|
+
base_url: Base URL for local LLM server (default: reads from LEMONADE_BASE_URL env var, falls back to http://localhost:8000/api/v1)
|
|
95
|
+
model_id: The ID of the model to use with LLM server (default for local)
|
|
96
|
+
max_steps: Maximum number of steps the agent can take before terminating
|
|
97
|
+
debug_prompts: If True, includes prompts in the conversation history
|
|
98
|
+
show_prompts: If True, displays prompts sent to LLM in console (default: False)
|
|
99
|
+
output_dir: Directory for storing JSON output files (default: current directory)
|
|
100
|
+
streaming: If True, enables real-time streaming of LLM responses (default: False)
|
|
101
|
+
show_stats: If True, displays LLM performance stats after each response (default: False)
|
|
102
|
+
silent_mode: If True, suppresses all console output for JSON-only usage (default: False)
|
|
103
|
+
debug: If True, enables debug output for troubleshooting (default: False)
|
|
104
|
+
output_handler: Custom OutputHandler for displaying agent output (default: None, creates console based on silent_mode)
|
|
105
|
+
max_plan_iterations: Maximum number of plan-execute-replan cycles (default: 3, 0 = unlimited)
|
|
106
|
+
min_context_size: Minimum context size required for this agent (default: 32768).
|
|
107
|
+
skip_lemonade: If True, skip Lemonade server initialization (default: False).
|
|
108
|
+
Use this when connecting to a different OpenAI-compatible backend.
|
|
109
|
+
|
|
110
|
+
Note: Uses local LLM server by default unless use_claude or use_chatgpt is True.
|
|
111
|
+
"""
|
|
112
|
+
self.error_history = [] # Store error history for learning
|
|
113
|
+
self.conversation_history = (
|
|
114
|
+
[]
|
|
115
|
+
) # Store conversation history for session persistence
|
|
116
|
+
self.max_steps = max_steps
|
|
117
|
+
self.debug_prompts = debug_prompts
|
|
118
|
+
self.show_prompts = show_prompts # Separate flag for displaying prompts
|
|
119
|
+
self.output_dir = output_dir if output_dir else os.getcwd()
|
|
120
|
+
self.streaming = streaming
|
|
121
|
+
self.show_stats = show_stats
|
|
122
|
+
self.silent_mode = silent_mode
|
|
123
|
+
self.debug = debug
|
|
124
|
+
self.last_result = None # Store the most recent result
|
|
125
|
+
self.max_plan_iterations = max_plan_iterations
|
|
126
|
+
|
|
127
|
+
# Read base_url from environment if not provided
|
|
128
|
+
if base_url is None:
|
|
129
|
+
base_url = os.getenv("LEMONADE_BASE_URL", "http://localhost:8000/api/v1")
|
|
130
|
+
|
|
131
|
+
# Lazy Lemonade initialization for local LLM users
|
|
132
|
+
# This ensures Lemonade server is running before we try to use it
|
|
133
|
+
if not (use_claude or use_chatgpt or skip_lemonade):
|
|
134
|
+
from gaia.llm.lemonade_manager import LemonadeManager
|
|
135
|
+
|
|
136
|
+
LemonadeManager.ensure_ready(
|
|
137
|
+
min_context_size=min_context_size,
|
|
138
|
+
quiet=silent_mode,
|
|
139
|
+
base_url=base_url,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Initialize state management
|
|
143
|
+
self.execution_state = self.STATE_PLANNING
|
|
144
|
+
self.current_plan = None
|
|
145
|
+
self.current_step = 0
|
|
146
|
+
self.total_plan_steps = 0
|
|
147
|
+
self.plan_iterations = 0 # Track number of plan cycles
|
|
148
|
+
|
|
149
|
+
# Initialize the console/output handler for display
|
|
150
|
+
# If output_handler is provided, use it; otherwise create based on silent_mode
|
|
151
|
+
if output_handler is not None:
|
|
152
|
+
self.console = output_handler
|
|
153
|
+
else:
|
|
154
|
+
self.console = self._create_console()
|
|
155
|
+
|
|
156
|
+
# Initialize LLM client for local model
|
|
157
|
+
self.system_prompt = self._get_system_prompt()
|
|
158
|
+
|
|
159
|
+
# Register tools for this agent
|
|
160
|
+
self._register_tools()
|
|
161
|
+
|
|
162
|
+
# Update system prompt with available tools
|
|
163
|
+
tools_description = self._format_tools_for_prompt()
|
|
164
|
+
self.system_prompt += f"\n\n==== AVAILABLE TOOLS ====\n{tools_description}\n\n"
|
|
165
|
+
|
|
166
|
+
# Initialize ChatSDK with proper configuration
|
|
167
|
+
# Note: We don't set system_prompt in config, we pass it per request
|
|
168
|
+
# Note: Context size is configured when starting Lemonade server, not here
|
|
169
|
+
# Use Qwen3-Coder-30B by default for better reasoning and JSON formatting
|
|
170
|
+
# The 0.5B model is too small for complex agent tasks
|
|
171
|
+
chat_config = ChatConfig(
|
|
172
|
+
model=model_id or "Qwen3-Coder-30B-A3B-Instruct-GGUF",
|
|
173
|
+
use_claude=use_claude,
|
|
174
|
+
use_chatgpt=use_chatgpt,
|
|
175
|
+
claude_model=claude_model,
|
|
176
|
+
base_url=base_url,
|
|
177
|
+
show_stats=True, # Always collect stats for token tracking
|
|
178
|
+
max_history_length=20, # Keep more history for agent conversations
|
|
179
|
+
max_tokens=4096, # Increased for complex code generation
|
|
180
|
+
)
|
|
181
|
+
self.chat = ChatSDK(chat_config)
|
|
182
|
+
self.model_id = model_id
|
|
183
|
+
|
|
184
|
+
# Print system prompt if show_prompts is enabled
|
|
185
|
+
# Debug: Check the actual value of show_prompts
|
|
186
|
+
if self.debug:
|
|
187
|
+
logger.debug(
|
|
188
|
+
f"show_prompts={self.show_prompts}, debug={self.debug}, will show prompt: {self.show_prompts}"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
if self.show_prompts:
|
|
192
|
+
self.console.print_prompt(self.system_prompt, "Initial System Prompt")
|
|
193
|
+
|
|
194
|
+
@abc.abstractmethod
|
|
195
|
+
def _get_system_prompt(self) -> str:
|
|
196
|
+
"""
|
|
197
|
+
Generate the system prompt for the agent.
|
|
198
|
+
Subclasses must implement this to provide domain-specific prompts.
|
|
199
|
+
"""
|
|
200
|
+
raise NotImplementedError("Subclasses must implement _get_system_prompt")
|
|
201
|
+
|
|
202
|
+
@abc.abstractmethod
|
|
203
|
+
def _create_console(self):
|
|
204
|
+
"""
|
|
205
|
+
Create and return a console output handler.
|
|
206
|
+
Returns SilentConsole if in silent_mode, otherwise AgentConsole.
|
|
207
|
+
Subclasses should override this to provide domain-specific console output.
|
|
208
|
+
"""
|
|
209
|
+
if self.silent_mode:
|
|
210
|
+
# Check if we should completely silence everything (including final answer)
|
|
211
|
+
# This would be true for JSON-only output or when output_dir is set
|
|
212
|
+
silence_final_answer = getattr(self, "output_dir", None) is not None
|
|
213
|
+
return SilentConsole(silence_final_answer=silence_final_answer)
|
|
214
|
+
return AgentConsole()
|
|
215
|
+
|
|
216
|
+
@abc.abstractmethod
|
|
217
|
+
def _register_tools(self):
|
|
218
|
+
"""
|
|
219
|
+
Register all domain-specific tools for the agent.
|
|
220
|
+
Subclasses must implement this method.
|
|
221
|
+
"""
|
|
222
|
+
raise NotImplementedError("Subclasses must implement _register_tools")
|
|
223
|
+
|
|
224
|
+
def _format_tools_for_prompt(self) -> str:
|
|
225
|
+
"""Format the registered tools into a string for the prompt."""
|
|
226
|
+
tool_descriptions = []
|
|
227
|
+
|
|
228
|
+
for name, tool_info in _TOOL_REGISTRY.items():
|
|
229
|
+
params_str = ", ".join(
|
|
230
|
+
[
|
|
231
|
+
f"{param_name}{'' if param_info['required'] else '?'}: {param_info['type']}"
|
|
232
|
+
for param_name, param_info in tool_info["parameters"].items()
|
|
233
|
+
]
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
description = tool_info["description"].strip()
|
|
237
|
+
tool_descriptions.append(f"- {name}({params_str}): {description}")
|
|
238
|
+
|
|
239
|
+
return "\n".join(tool_descriptions)
|
|
240
|
+
|
|
241
|
+
def list_tools(self, verbose: bool = True) -> None:
|
|
242
|
+
"""
|
|
243
|
+
Display all tools registered for this agent with their parameters and descriptions.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
verbose: If True, displays full descriptions and parameter details. If False, shows a compact list.
|
|
247
|
+
"""
|
|
248
|
+
self.console.print_header(f"🛠️ Registered Tools for {self.__class__.__name__}")
|
|
249
|
+
self.console.print_separator()
|
|
250
|
+
|
|
251
|
+
for name, tool_info in _TOOL_REGISTRY.items():
|
|
252
|
+
# Format parameters
|
|
253
|
+
params = []
|
|
254
|
+
for param_name, param_info in tool_info["parameters"].items():
|
|
255
|
+
required = param_info.get("required", False)
|
|
256
|
+
param_type = param_info.get("type", "Any")
|
|
257
|
+
default = param_info.get("default", None)
|
|
258
|
+
|
|
259
|
+
if required:
|
|
260
|
+
params.append(f"{param_name}: {param_type}")
|
|
261
|
+
else:
|
|
262
|
+
default_str = f"={default}" if default is not None else "=None"
|
|
263
|
+
params.append(f"{param_name}: {param_type}{default_str}")
|
|
264
|
+
|
|
265
|
+
params_str = ", ".join(params)
|
|
266
|
+
|
|
267
|
+
# Get description
|
|
268
|
+
if verbose:
|
|
269
|
+
description = tool_info["description"]
|
|
270
|
+
else:
|
|
271
|
+
description = (
|
|
272
|
+
tool_info["description"].split("\n")[0]
|
|
273
|
+
if tool_info["description"]
|
|
274
|
+
else "No description"
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Print tool information
|
|
278
|
+
self.console.print_tool_info(name, params_str, description)
|
|
279
|
+
|
|
280
|
+
self.console.print_separator()
|
|
281
|
+
|
|
282
|
+
return None
|
|
283
|
+
|
|
284
|
+
def _extract_json_from_response(self, response: str) -> Optional[Dict[str, Any]]:
|
|
285
|
+
"""
|
|
286
|
+
Apply multiple extraction strategies to find valid JSON in the response.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
response: The raw response from the LLM
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Extracted JSON dictionary or None if extraction failed
|
|
293
|
+
"""
|
|
294
|
+
# Strategy 1: Extract JSON from code blocks with various patterns
|
|
295
|
+
json_patterns = [
|
|
296
|
+
r"```(?:json)?\s*(.*?)\s*```", # Standard code block
|
|
297
|
+
r"`json\s*(.*?)\s*`", # Single backtick with json tag
|
|
298
|
+
r"<json>\s*(.*?)\s*</json>", # XML-style tags
|
|
299
|
+
]
|
|
300
|
+
|
|
301
|
+
for pattern in json_patterns:
|
|
302
|
+
matches = re.findall(pattern, response, re.DOTALL)
|
|
303
|
+
for match in matches:
|
|
304
|
+
try:
|
|
305
|
+
result = json.loads(match)
|
|
306
|
+
# Ensure tool_args exists if tool is present
|
|
307
|
+
if "tool" in result and "tool_args" not in result:
|
|
308
|
+
result["tool_args"] = {}
|
|
309
|
+
logger.debug(f"Successfully extracted JSON with pattern {pattern}")
|
|
310
|
+
return result
|
|
311
|
+
except json.JSONDecodeError:
|
|
312
|
+
continue
|
|
313
|
+
|
|
314
|
+
start_idx = response.find("{")
|
|
315
|
+
if start_idx >= 0:
|
|
316
|
+
bracket_count = 0
|
|
317
|
+
in_string = False
|
|
318
|
+
escape_next = False
|
|
319
|
+
|
|
320
|
+
for i, char in enumerate(response[start_idx:], start_idx):
|
|
321
|
+
if escape_next:
|
|
322
|
+
escape_next = False
|
|
323
|
+
continue
|
|
324
|
+
if char == "\\":
|
|
325
|
+
escape_next = True
|
|
326
|
+
continue
|
|
327
|
+
if char == '"' and not escape_next:
|
|
328
|
+
in_string = not in_string
|
|
329
|
+
if not in_string:
|
|
330
|
+
if char == "{":
|
|
331
|
+
bracket_count += 1
|
|
332
|
+
elif char == "}":
|
|
333
|
+
bracket_count -= 1
|
|
334
|
+
if bracket_count == 0:
|
|
335
|
+
# Found complete JSON object
|
|
336
|
+
try:
|
|
337
|
+
extracted = response[start_idx : i + 1]
|
|
338
|
+
# Fix common issues before parsing
|
|
339
|
+
fixed = re.sub(r",\s*}", "}", extracted)
|
|
340
|
+
fixed = re.sub(r",\s*]", "]", fixed)
|
|
341
|
+
result = json.loads(fixed)
|
|
342
|
+
# Ensure tool_args exists if tool is present
|
|
343
|
+
if "tool" in result and "tool_args" not in result:
|
|
344
|
+
result["tool_args"] = {}
|
|
345
|
+
logger.debug(
|
|
346
|
+
"Successfully extracted JSON using bracket-matching"
|
|
347
|
+
)
|
|
348
|
+
return result
|
|
349
|
+
except json.JSONDecodeError as e:
|
|
350
|
+
logger.debug(f"Bracket-matched JSON parse failed: {e}")
|
|
351
|
+
break
|
|
352
|
+
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
def validate_json_response(self, response_text: str) -> Dict[str, Any]:
|
|
356
|
+
"""
|
|
357
|
+
Validates and attempts to fix JSON responses from the LLM.
|
|
358
|
+
|
|
359
|
+
Attempts the following fixes in order:
|
|
360
|
+
1. Parse as-is if valid JSON
|
|
361
|
+
2. Extract JSON from code blocks
|
|
362
|
+
3. Truncate after first complete JSON object
|
|
363
|
+
4. Fix common JSON syntax errors
|
|
364
|
+
5. Extract JSON-like content using regex
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
response_text: The response string from the LLM
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
A dictionary containing the parsed JSON if valid
|
|
371
|
+
|
|
372
|
+
Raises:
|
|
373
|
+
ValueError: If the response cannot be parsed as JSON or is missing required fields
|
|
374
|
+
"""
|
|
375
|
+
original_response = response_text
|
|
376
|
+
json_was_modified = False
|
|
377
|
+
|
|
378
|
+
# Step 0: Sanitize control characters to ensure proper JSON format
|
|
379
|
+
def sanitize_json_string(text: str) -> str:
|
|
380
|
+
"""
|
|
381
|
+
Ensure JSON strings have properly escaped control characters.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
text: JSON text that may contain unescaped control characters
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
Sanitized JSON text with properly escaped control characters
|
|
388
|
+
"""
|
|
389
|
+
|
|
390
|
+
def escape_string_content(match):
|
|
391
|
+
"""Ensure control characters are properly escaped in JSON string values."""
|
|
392
|
+
quote = match.group(1)
|
|
393
|
+
content = match.group(2)
|
|
394
|
+
closing_quote = match.group(3)
|
|
395
|
+
|
|
396
|
+
# Ensure proper escaping of control characters
|
|
397
|
+
content = content.replace("\n", "\\n")
|
|
398
|
+
content = content.replace("\r", "\\r")
|
|
399
|
+
content = content.replace("\t", "\\t")
|
|
400
|
+
content = content.replace("\b", "\\b")
|
|
401
|
+
content = content.replace("\f", "\\f")
|
|
402
|
+
|
|
403
|
+
return f"{quote}{content}{closing_quote}"
|
|
404
|
+
|
|
405
|
+
# Match JSON strings: "..." handling escaped quotes
|
|
406
|
+
pattern = r'(")([^"\\]*(?:\\.[^"\\]*)*)(")'
|
|
407
|
+
|
|
408
|
+
try:
|
|
409
|
+
return re.sub(pattern, escape_string_content, text)
|
|
410
|
+
except Exception as e:
|
|
411
|
+
logger.debug(
|
|
412
|
+
f"[JSON] String sanitization encountered issue: {e}, using original"
|
|
413
|
+
)
|
|
414
|
+
return text
|
|
415
|
+
|
|
416
|
+
response_text = sanitize_json_string(response_text)
|
|
417
|
+
|
|
418
|
+
# Step 1: Try to parse as-is
|
|
419
|
+
try:
|
|
420
|
+
json_response = json.loads(response_text)
|
|
421
|
+
logger.debug("[JSON] Successfully parsed response without modifications")
|
|
422
|
+
except json.JSONDecodeError as initial_error:
|
|
423
|
+
# Step 2: Try to extract from code blocks
|
|
424
|
+
json_match = re.search(
|
|
425
|
+
r"```(?:json)?\s*({.*?})\s*```", response_text, re.DOTALL
|
|
426
|
+
)
|
|
427
|
+
if json_match:
|
|
428
|
+
try:
|
|
429
|
+
response_text = json_match.group(1)
|
|
430
|
+
json_response = json.loads(response_text)
|
|
431
|
+
json_was_modified = True
|
|
432
|
+
logger.warning("[JSON] Extracted JSON from code block")
|
|
433
|
+
except json.JSONDecodeError as e:
|
|
434
|
+
logger.debug(f"[JSON] Code block extraction failed: {e}")
|
|
435
|
+
|
|
436
|
+
# Step 3: Try to find and extract first complete JSON object
|
|
437
|
+
if not json_was_modified:
|
|
438
|
+
# Find the first '{' and try to match brackets
|
|
439
|
+
start_idx = response_text.find("{")
|
|
440
|
+
if start_idx >= 0:
|
|
441
|
+
bracket_count = 0
|
|
442
|
+
in_string = False
|
|
443
|
+
escape_next = False
|
|
444
|
+
|
|
445
|
+
for i, char in enumerate(response_text[start_idx:], start_idx):
|
|
446
|
+
if escape_next:
|
|
447
|
+
escape_next = False
|
|
448
|
+
continue
|
|
449
|
+
if char == "\\":
|
|
450
|
+
escape_next = True
|
|
451
|
+
continue
|
|
452
|
+
if char == '"' and not escape_next:
|
|
453
|
+
in_string = not in_string
|
|
454
|
+
if not in_string:
|
|
455
|
+
if char == "{":
|
|
456
|
+
bracket_count += 1
|
|
457
|
+
elif char == "}":
|
|
458
|
+
bracket_count -= 1
|
|
459
|
+
if bracket_count == 0:
|
|
460
|
+
# Found complete JSON object
|
|
461
|
+
try:
|
|
462
|
+
truncated = response_text[start_idx : i + 1]
|
|
463
|
+
json_response = json.loads(truncated)
|
|
464
|
+
json_was_modified = True
|
|
465
|
+
logger.warning(
|
|
466
|
+
f"[JSON] Truncated response after first complete JSON object (removed {len(response_text) - i - 1} chars)"
|
|
467
|
+
)
|
|
468
|
+
response_text = truncated
|
|
469
|
+
break
|
|
470
|
+
except json.JSONDecodeError:
|
|
471
|
+
logger.debug(
|
|
472
|
+
"[JSON] Truncated text is not valid JSON, trying next bracket pair"
|
|
473
|
+
)
|
|
474
|
+
continue
|
|
475
|
+
|
|
476
|
+
# Step 4: Try to fix common JSON errors
|
|
477
|
+
if not json_was_modified:
|
|
478
|
+
fixed_text = response_text
|
|
479
|
+
|
|
480
|
+
# Remove trailing commas
|
|
481
|
+
fixed_text = re.sub(r",\s*}", "}", fixed_text)
|
|
482
|
+
fixed_text = re.sub(r",\s*]", "]", fixed_text)
|
|
483
|
+
|
|
484
|
+
# Fix single quotes to double quotes (carefully)
|
|
485
|
+
if "'" in fixed_text and '"' not in fixed_text:
|
|
486
|
+
fixed_text = fixed_text.replace("'", '"')
|
|
487
|
+
|
|
488
|
+
# Remove any text before first '{' or '['
|
|
489
|
+
json_start = min(
|
|
490
|
+
fixed_text.find("{") if "{" in fixed_text else len(fixed_text),
|
|
491
|
+
fixed_text.find("[") if "[" in fixed_text else len(fixed_text),
|
|
492
|
+
)
|
|
493
|
+
if json_start > 0 and json_start < len(fixed_text):
|
|
494
|
+
fixed_text = fixed_text[json_start:]
|
|
495
|
+
|
|
496
|
+
# Try to parse the fixed text
|
|
497
|
+
if fixed_text != response_text:
|
|
498
|
+
try:
|
|
499
|
+
json_response = json.loads(fixed_text)
|
|
500
|
+
json_was_modified = True
|
|
501
|
+
logger.warning("[JSON] Applied automatic JSON fixes")
|
|
502
|
+
response_text = fixed_text
|
|
503
|
+
except json.JSONDecodeError as e:
|
|
504
|
+
logger.debug(f"[JSON] Auto-fix failed: {e}")
|
|
505
|
+
|
|
506
|
+
# If still no valid JSON, raise the original error
|
|
507
|
+
if not json_was_modified:
|
|
508
|
+
raise ValueError(
|
|
509
|
+
f"Failed to parse response as JSON: {str(initial_error)}"
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
# Log warning if JSON was modified
|
|
513
|
+
if json_was_modified:
|
|
514
|
+
logger.warning(
|
|
515
|
+
f"[JSON] Response was modified to extract valid JSON. Original length: {len(original_response)}, Fixed length: {len(response_text)}"
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
# Validate required fields
|
|
519
|
+
# Note: 'goal' is optional for simple answer responses
|
|
520
|
+
if "answer" in json_response:
|
|
521
|
+
required_fields = ["thought", "answer"] # goal is optional
|
|
522
|
+
elif "tool" in json_response:
|
|
523
|
+
required_fields = ["thought", "tool", "tool_args"] # goal is optional
|
|
524
|
+
else:
|
|
525
|
+
required_fields = ["thought", "plan"] # goal is optional
|
|
526
|
+
|
|
527
|
+
missing_fields = [
|
|
528
|
+
field for field in required_fields if field not in json_response
|
|
529
|
+
]
|
|
530
|
+
if missing_fields:
|
|
531
|
+
raise ValueError(
|
|
532
|
+
f"Response is missing required fields: {', '.join(missing_fields)}"
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
return json_response
|
|
536
|
+
|
|
537
|
+
def _parse_llm_response(self, response: str) -> Dict[str, Any]:
|
|
538
|
+
"""
|
|
539
|
+
Parse the LLM response to extract tool calls or conversational answers.
|
|
540
|
+
|
|
541
|
+
ARCHITECTURE: Supports two response modes
|
|
542
|
+
- Plain text for conversation (no JSON required)
|
|
543
|
+
- JSON for tool invocations
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
response: The raw response from the LLM
|
|
547
|
+
|
|
548
|
+
Returns:
|
|
549
|
+
Parsed response as a dictionary
|
|
550
|
+
"""
|
|
551
|
+
# Check for empty responses
|
|
552
|
+
if not response or not response.strip():
|
|
553
|
+
logger.warning("Empty LLM response received")
|
|
554
|
+
self.error_history.append("Empty LLM response")
|
|
555
|
+
|
|
556
|
+
# Provide more helpful error message based on context
|
|
557
|
+
if hasattr(self, "api_mode") and self.api_mode: # pylint: disable=no-member
|
|
558
|
+
answer = "I encountered an issue processing your request. This might be due to a connection problem with the language model. Please try again."
|
|
559
|
+
else:
|
|
560
|
+
answer = "I apologize, but I received an empty response from the language model. Please try again."
|
|
561
|
+
|
|
562
|
+
return {
|
|
563
|
+
"thought": "LLM returned empty response",
|
|
564
|
+
"goal": "Handle empty response error",
|
|
565
|
+
"answer": answer,
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
response = response.strip()
|
|
569
|
+
|
|
570
|
+
# Log what we received for debugging (show more to see full JSON)
|
|
571
|
+
if len(response) > 500:
|
|
572
|
+
logger.debug(
|
|
573
|
+
f"📥 LLM Response ({len(response)} chars): {response[:500]}..."
|
|
574
|
+
)
|
|
575
|
+
else:
|
|
576
|
+
logger.debug(f"📥 LLM Response: {response}")
|
|
577
|
+
|
|
578
|
+
# STEP 1: Fast path - detect plain text conversational responses
|
|
579
|
+
# If response doesn't start with '{', it's likely plain text
|
|
580
|
+
# Accept it immediately without logging errors
|
|
581
|
+
if not response.startswith("{"):
|
|
582
|
+
logger.debug(
|
|
583
|
+
f"[PARSE] Plain text conversational response (length: {len(response)})"
|
|
584
|
+
)
|
|
585
|
+
return {"thought": "", "goal": "", "answer": response}
|
|
586
|
+
|
|
587
|
+
# STEP 2: Response starts with '{' - looks like JSON
|
|
588
|
+
# Try direct JSON parsing first (fastest path)
|
|
589
|
+
try:
|
|
590
|
+
result = json.loads(response)
|
|
591
|
+
# Ensure tool_args exists if tool is present
|
|
592
|
+
if "tool" in result and "tool_args" not in result:
|
|
593
|
+
result["tool_args"] = {}
|
|
594
|
+
logger.debug("[PARSE] Valid JSON response")
|
|
595
|
+
return result
|
|
596
|
+
except json.JSONDecodeError:
|
|
597
|
+
# JSON parsing failed - continue to extraction methods
|
|
598
|
+
logger.debug("[PARSE] Malformed JSON, trying extraction")
|
|
599
|
+
|
|
600
|
+
# STEP 3: Try JSON extraction methods (handles code blocks, mixed text, etc.)
|
|
601
|
+
extracted_json = self._extract_json_from_response(response)
|
|
602
|
+
if extracted_json:
|
|
603
|
+
logger.debug("[PARSE] Extracted JSON successfully")
|
|
604
|
+
return extracted_json
|
|
605
|
+
|
|
606
|
+
# STEP 4: JSON was expected (starts with '{') but all parsing failed
|
|
607
|
+
# Log error ONLY for JSON that couldn't be parsed
|
|
608
|
+
logger.debug("Attempting to extract fields using regex")
|
|
609
|
+
thought_match = re.search(r'"thought":\s*"([^"]*)"', response)
|
|
610
|
+
tool_match = re.search(r'"tool":\s*"([^"]*)"', response)
|
|
611
|
+
answer_match = re.search(r'"answer":\s*"([^"]*)"', response)
|
|
612
|
+
plan_match = re.search(r'"plan":\s*(\[.*?\])', response, re.DOTALL)
|
|
613
|
+
|
|
614
|
+
if answer_match:
|
|
615
|
+
result = {
|
|
616
|
+
"thought": thought_match.group(1) if thought_match else "",
|
|
617
|
+
"goal": "what was achieved",
|
|
618
|
+
"answer": answer_match.group(1),
|
|
619
|
+
}
|
|
620
|
+
logger.debug(f"Extracted answer using regex: {result}")
|
|
621
|
+
return result
|
|
622
|
+
|
|
623
|
+
if tool_match:
|
|
624
|
+
tool_args = {}
|
|
625
|
+
|
|
626
|
+
tool_args_start = response.find('"tool_args"')
|
|
627
|
+
|
|
628
|
+
if tool_args_start >= 0:
|
|
629
|
+
# Find the opening brace after "tool_args":
|
|
630
|
+
brace_start = response.find("{", tool_args_start)
|
|
631
|
+
if brace_start >= 0:
|
|
632
|
+
# Use bracket-matching to find the complete object
|
|
633
|
+
bracket_count = 0
|
|
634
|
+
in_string = False
|
|
635
|
+
escape_next = False
|
|
636
|
+
for i, char in enumerate(response[brace_start:], brace_start):
|
|
637
|
+
if escape_next:
|
|
638
|
+
escape_next = False
|
|
639
|
+
continue
|
|
640
|
+
if char == "\\":
|
|
641
|
+
escape_next = True
|
|
642
|
+
continue
|
|
643
|
+
if char == '"' and not escape_next:
|
|
644
|
+
in_string = not in_string
|
|
645
|
+
if not in_string:
|
|
646
|
+
if char == "{":
|
|
647
|
+
bracket_count += 1
|
|
648
|
+
elif char == "}":
|
|
649
|
+
bracket_count -= 1
|
|
650
|
+
if bracket_count == 0:
|
|
651
|
+
# Found complete tool_args object
|
|
652
|
+
tool_args_str = response[brace_start : i + 1]
|
|
653
|
+
try:
|
|
654
|
+
tool_args = json.loads(tool_args_str)
|
|
655
|
+
except json.JSONDecodeError as e:
|
|
656
|
+
error_msg = f"Failed to parse tool_args JSON: {str(e)}, content: {tool_args_str[:100]}..."
|
|
657
|
+
logger.error(error_msg)
|
|
658
|
+
self.error_history.append(error_msg)
|
|
659
|
+
break
|
|
660
|
+
|
|
661
|
+
result = {
|
|
662
|
+
"thought": thought_match.group(1) if thought_match else "",
|
|
663
|
+
"goal": "clear statement of what you're trying to achieve",
|
|
664
|
+
"tool": tool_match.group(1),
|
|
665
|
+
"tool_args": tool_args,
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
# Add plan if found
|
|
669
|
+
if plan_match:
|
|
670
|
+
try:
|
|
671
|
+
result["plan"] = json.loads(plan_match.group(1))
|
|
672
|
+
logger.debug(f"Extracted plan using regex: {result['plan']}")
|
|
673
|
+
except json.JSONDecodeError as e:
|
|
674
|
+
error_msg = f"Failed to parse plan JSON: {str(e)}, content: {plan_match.group(1)[:100]}..."
|
|
675
|
+
logger.error(error_msg)
|
|
676
|
+
self.error_history.append(error_msg)
|
|
677
|
+
|
|
678
|
+
logger.debug(f"Extracted tool call using regex: {result}")
|
|
679
|
+
return result
|
|
680
|
+
|
|
681
|
+
# Try to match simple key-value patterns for object names (like ': "my_cube"')
|
|
682
|
+
obj_name_match = re.search(
|
|
683
|
+
r'["\':]?\s*["\'"]?([a-zA-Z0-9_\.]+)["\'"]?', response
|
|
684
|
+
)
|
|
685
|
+
if obj_name_match:
|
|
686
|
+
object_name = obj_name_match.group(1)
|
|
687
|
+
# If it looks like an object name and not just a random word
|
|
688
|
+
if "." in object_name or "_" in object_name:
|
|
689
|
+
logger.debug(f"Found potential object name: {object_name}")
|
|
690
|
+
return {
|
|
691
|
+
"thought": "Extracted object name",
|
|
692
|
+
"goal": "Use the object name",
|
|
693
|
+
"answer": object_name,
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
# CONVERSATIONAL MODE: No JSON found - treat as plain conversational response
|
|
697
|
+
# This is normal and expected for chat agents responding to greetings, explanations, etc.
|
|
698
|
+
logger.debug(
|
|
699
|
+
f"[PARSE] No JSON structure found, treating as conversational response. Length: {len(response)}, preview: {response[:100]}..."
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
# If response is empty, provide a meaningful fallback
|
|
703
|
+
if not response.strip():
|
|
704
|
+
logger.warning("[PARSE] Empty response received from LLM")
|
|
705
|
+
return {
|
|
706
|
+
"thought": "",
|
|
707
|
+
"goal": "",
|
|
708
|
+
"answer": "I apologize, but I received an empty response. Please try again.",
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
# Valid conversational response - wrap it in expected format
|
|
712
|
+
return {"thought": "", "goal": "", "answer": response.strip()}
|
|
713
|
+
|
|
714
|
+
def _execute_tool(self, tool_name: str, tool_args: Dict[str, Any]) -> Any:
|
|
715
|
+
"""
|
|
716
|
+
Execute a tool by name with the provided arguments.
|
|
717
|
+
|
|
718
|
+
Args:
|
|
719
|
+
tool_name: Name of the tool to execute
|
|
720
|
+
tool_args: Arguments to pass to the tool
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
Result of the tool execution
|
|
724
|
+
"""
|
|
725
|
+
logger.debug(f"Executing tool {tool_name} with args: {tool_args}")
|
|
726
|
+
|
|
727
|
+
if tool_name not in _TOOL_REGISTRY:
|
|
728
|
+
logger.error(f"Tool '{tool_name}' not found in registry")
|
|
729
|
+
return {"status": "error", "error": f"Tool '{tool_name}' not found"}
|
|
730
|
+
|
|
731
|
+
tool = _TOOL_REGISTRY[tool_name]["function"]
|
|
732
|
+
sig = inspect.signature(tool)
|
|
733
|
+
|
|
734
|
+
# Get required parameters (those without defaults)
|
|
735
|
+
required_args = {
|
|
736
|
+
name: param
|
|
737
|
+
for name, param in sig.parameters.items()
|
|
738
|
+
if param.default == inspect.Parameter.empty and name != "return"
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
# Check for missing required arguments
|
|
742
|
+
missing_args = [arg for arg in required_args if arg not in tool_args]
|
|
743
|
+
if missing_args:
|
|
744
|
+
error_msg = (
|
|
745
|
+
f"Missing required arguments for {tool_name}: {', '.join(missing_args)}"
|
|
746
|
+
)
|
|
747
|
+
logger.error(error_msg)
|
|
748
|
+
return {"status": "error", "error": error_msg}
|
|
749
|
+
|
|
750
|
+
try:
|
|
751
|
+
result = tool(**tool_args)
|
|
752
|
+
logger.debug(f"Tool execution result: {result}")
|
|
753
|
+
return result
|
|
754
|
+
except subprocess.TimeoutExpired as e:
|
|
755
|
+
# Handle subprocess timeout specifically
|
|
756
|
+
error_msg = f"Tool {tool_name} timed out: {str(e)}"
|
|
757
|
+
logger.error(error_msg)
|
|
758
|
+
self.error_history.append(error_msg)
|
|
759
|
+
return {"status": "error", "error": error_msg, "timeout": True}
|
|
760
|
+
except Exception as e:
|
|
761
|
+
logger.error(f"Error executing tool {tool_name}: {str(e)}")
|
|
762
|
+
self.error_history.append(str(e))
|
|
763
|
+
return {"status": "error", "error": str(e)}
|
|
764
|
+
|
|
765
|
+
def _generate_max_steps_message(
|
|
766
|
+
self, conversation: List[Dict], steps_taken: int, steps_limit: int
|
|
767
|
+
) -> str:
|
|
768
|
+
"""Generate informative message when max steps is reached.
|
|
769
|
+
|
|
770
|
+
Args:
|
|
771
|
+
conversation: The conversation history
|
|
772
|
+
steps_taken: Number of steps actually taken
|
|
773
|
+
steps_limit: Maximum steps allowed
|
|
774
|
+
|
|
775
|
+
Returns:
|
|
776
|
+
Informative message about what was accomplished
|
|
777
|
+
"""
|
|
778
|
+
# Analyze what was done
|
|
779
|
+
tool_calls = [
|
|
780
|
+
msg
|
|
781
|
+
for msg in conversation
|
|
782
|
+
if msg.get("role") == "assistant" and "tool_calls" in msg
|
|
783
|
+
]
|
|
784
|
+
|
|
785
|
+
tools_used = []
|
|
786
|
+
for msg in tool_calls:
|
|
787
|
+
for tool_call in msg.get("tool_calls", []):
|
|
788
|
+
if "function" in tool_call:
|
|
789
|
+
tools_used.append(tool_call["function"]["name"])
|
|
790
|
+
|
|
791
|
+
message = f"⚠️ Reached maximum steps limit ({steps_limit} steps)\n\n"
|
|
792
|
+
message += f"Completed {steps_taken} steps using these tools:\n"
|
|
793
|
+
|
|
794
|
+
# Count tool usage
|
|
795
|
+
from collections import Counter
|
|
796
|
+
|
|
797
|
+
tool_counts = Counter(tools_used)
|
|
798
|
+
for tool, count in tool_counts.most_common(10):
|
|
799
|
+
message += f" - {tool}: {count}x\n"
|
|
800
|
+
|
|
801
|
+
message += "\nTo continue or complete this task:\n"
|
|
802
|
+
message += "1. Review the generated files and progress so far\n"
|
|
803
|
+
message += f"2. Run with --max-steps {steps_limit + 50} to allow more steps\n"
|
|
804
|
+
message += "3. Or complete remaining tasks manually\n"
|
|
805
|
+
|
|
806
|
+
return message
|
|
807
|
+
|
|
808
|
+
def _write_json_to_file(self, data: Dict[str, Any], filename: str = None) -> str:
|
|
809
|
+
"""
|
|
810
|
+
Write JSON data to a file and return the absolute path.
|
|
811
|
+
|
|
812
|
+
Args:
|
|
813
|
+
data: Dictionary data to write as JSON
|
|
814
|
+
filename: Optional filename, if None a timestamped name will be generated
|
|
815
|
+
|
|
816
|
+
Returns:
|
|
817
|
+
Absolute path to the saved file
|
|
818
|
+
"""
|
|
819
|
+
# Ensure output directory exists
|
|
820
|
+
os.makedirs(self.output_dir, exist_ok=True)
|
|
821
|
+
|
|
822
|
+
# Generate filename if not provided
|
|
823
|
+
if not filename:
|
|
824
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
825
|
+
filename = f"agent_output_{timestamp}.json"
|
|
826
|
+
|
|
827
|
+
# Ensure filename has .json extension
|
|
828
|
+
if not filename.endswith(".json"):
|
|
829
|
+
filename += ".json"
|
|
830
|
+
|
|
831
|
+
# Create absolute path
|
|
832
|
+
file_path = os.path.join(self.output_dir, filename)
|
|
833
|
+
|
|
834
|
+
# Write JSON data to file
|
|
835
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
836
|
+
json.dump(data, f, indent=2)
|
|
837
|
+
|
|
838
|
+
return os.path.abspath(file_path)
|
|
839
|
+
|
|
840
|
+
def _handle_large_tool_result(
|
|
841
|
+
self,
|
|
842
|
+
tool_name: str,
|
|
843
|
+
tool_result: Any,
|
|
844
|
+
conversation: List[Dict[str, Any]],
|
|
845
|
+
tool_args: Optional[Dict[str, Any]] = None,
|
|
846
|
+
) -> Any:
|
|
847
|
+
"""
|
|
848
|
+
Handle large tool results by truncating them if necessary.
|
|
849
|
+
|
|
850
|
+
Args:
|
|
851
|
+
tool_name: Name of the executed tool
|
|
852
|
+
tool_result: The result from tool execution
|
|
853
|
+
conversation: The conversation list to append to
|
|
854
|
+
tool_args: Arguments passed to the tool (optional)
|
|
855
|
+
|
|
856
|
+
Returns:
|
|
857
|
+
The truncated result or original if within limits
|
|
858
|
+
"""
|
|
859
|
+
truncated_result = tool_result
|
|
860
|
+
if isinstance(tool_result, (dict, list)):
|
|
861
|
+
result_str = json.dumps(tool_result)
|
|
862
|
+
if (
|
|
863
|
+
len(result_str) > 30000
|
|
864
|
+
): # Threshold for truncation (appropriate for 32K context)
|
|
865
|
+
# Truncate large results to prevent overwhelming the LLM
|
|
866
|
+
truncated_str = self._truncate_large_content(
|
|
867
|
+
tool_result, max_chars=20000 # Increased for 32K context
|
|
868
|
+
)
|
|
869
|
+
try:
|
|
870
|
+
truncated_result = json.loads(truncated_str)
|
|
871
|
+
except json.JSONDecodeError:
|
|
872
|
+
# If truncated string isn't valid JSON, use it as-is
|
|
873
|
+
truncated_result = truncated_str
|
|
874
|
+
# Notify user about truncation
|
|
875
|
+
self.console.print_info(
|
|
876
|
+
f"Note: Large result ({len(result_str)} chars) truncated for LLM context"
|
|
877
|
+
)
|
|
878
|
+
if self.debug:
|
|
879
|
+
print(f"[DEBUG] Tool result truncated from {len(result_str)} chars")
|
|
880
|
+
|
|
881
|
+
# Add to conversation
|
|
882
|
+
tool_entry: Dict[str, Any] = {
|
|
883
|
+
"role": "tool",
|
|
884
|
+
"name": tool_name,
|
|
885
|
+
"content": truncated_result,
|
|
886
|
+
}
|
|
887
|
+
if tool_args is not None:
|
|
888
|
+
tool_entry["tool_args"] = tool_args
|
|
889
|
+
conversation.append(tool_entry)
|
|
890
|
+
return truncated_result
|
|
891
|
+
|
|
892
|
+
def _create_tool_message(self, tool_name: str, tool_output: Any) -> Dict[str, Any]:
|
|
893
|
+
"""
|
|
894
|
+
Build a message structure representing a tool output for downstream LLM calls.
|
|
895
|
+
"""
|
|
896
|
+
if isinstance(tool_output, str):
|
|
897
|
+
text_content = tool_output
|
|
898
|
+
else:
|
|
899
|
+
text_content = self._truncate_large_content(tool_output, max_chars=2000)
|
|
900
|
+
|
|
901
|
+
if not isinstance(text_content, str):
|
|
902
|
+
text_content = json.dumps(tool_output)
|
|
903
|
+
|
|
904
|
+
return {
|
|
905
|
+
"role": "tool",
|
|
906
|
+
"name": tool_name,
|
|
907
|
+
"tool_call_id": uuid.uuid4().hex,
|
|
908
|
+
"content": [{"type": "text", "text": text_content}],
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
def _truncate_large_content(self, content: Any, max_chars: int = 2000) -> str:
|
|
912
|
+
"""
|
|
913
|
+
Truncate large content to prevent overwhelming the LLM.
|
|
914
|
+
Defaults to 20000 chars which is appropriate for 32K token context window.
|
|
915
|
+
"""
|
|
916
|
+
|
|
917
|
+
# If we have test_results in the output we don't want to
|
|
918
|
+
# truncate as this can contain important information on
|
|
919
|
+
# how to fix the tests
|
|
920
|
+
if isinstance(content, dict) and (
|
|
921
|
+
"test_results" in content or "run_tests" in content
|
|
922
|
+
):
|
|
923
|
+
return json.dumps(content)
|
|
924
|
+
|
|
925
|
+
# Convert to string (use compact JSON first to check size)
|
|
926
|
+
if isinstance(content, (dict, list)):
|
|
927
|
+
compact_str = json.dumps(content)
|
|
928
|
+
# Only use indented format if we need to truncate anyway
|
|
929
|
+
content_str = (
|
|
930
|
+
json.dumps(content, indent=2)
|
|
931
|
+
if len(compact_str) > max_chars
|
|
932
|
+
else compact_str
|
|
933
|
+
)
|
|
934
|
+
else:
|
|
935
|
+
content_str = str(content)
|
|
936
|
+
|
|
937
|
+
# Return as-is if within limits
|
|
938
|
+
if len(content_str) <= max_chars:
|
|
939
|
+
return content_str
|
|
940
|
+
|
|
941
|
+
# For responses with chunks (e.g., search results, document retrieval)
|
|
942
|
+
if (
|
|
943
|
+
isinstance(content, dict)
|
|
944
|
+
and "chunks" in content
|
|
945
|
+
and isinstance(content["chunks"], list)
|
|
946
|
+
):
|
|
947
|
+
truncated = content.copy()
|
|
948
|
+
|
|
949
|
+
# Keep all chunks but truncate individual chunk content if needed
|
|
950
|
+
if "chunks" in truncated:
|
|
951
|
+
for chunk in truncated["chunks"]:
|
|
952
|
+
if isinstance(chunk, dict) and "content" in chunk:
|
|
953
|
+
# Keep full content for chunks (they're the actual data)
|
|
954
|
+
# Only truncate if a single chunk is massive
|
|
955
|
+
if len(chunk["content"]) > CHUNK_TRUNCATION_THRESHOLD:
|
|
956
|
+
chunk["content"] = (
|
|
957
|
+
chunk["content"][:CHUNK_TRUNCATION_SIZE]
|
|
958
|
+
+ "\n...[chunk truncated]...\n"
|
|
959
|
+
+ chunk["content"][-CHUNK_TRUNCATION_SIZE:]
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
result_str = json.dumps(truncated, indent=2)
|
|
963
|
+
# Use larger limit for chunked responses since chunks are the actual data
|
|
964
|
+
if len(result_str) <= max_chars * 3: # Allow up to 60KB for chunked data
|
|
965
|
+
return result_str
|
|
966
|
+
# If still too large, keep first 3 chunks only
|
|
967
|
+
truncated["chunks"] = truncated["chunks"][:3]
|
|
968
|
+
return json.dumps(truncated, indent=2)
|
|
969
|
+
|
|
970
|
+
# For Jira responses, keep first 3 issues
|
|
971
|
+
if (
|
|
972
|
+
isinstance(content, dict)
|
|
973
|
+
and "issues" in content
|
|
974
|
+
and isinstance(content["issues"], list)
|
|
975
|
+
):
|
|
976
|
+
truncated = {
|
|
977
|
+
**content,
|
|
978
|
+
"issues": content["issues"][:3],
|
|
979
|
+
"truncated": True,
|
|
980
|
+
"total": len(content["issues"]),
|
|
981
|
+
}
|
|
982
|
+
return json.dumps(truncated, indent=2)[:max_chars]
|
|
983
|
+
|
|
984
|
+
# For lists, keep first 3 items
|
|
985
|
+
if isinstance(content, list):
|
|
986
|
+
truncated = (
|
|
987
|
+
content[:3] + [{"truncated": f"{len(content) - 3} more"}]
|
|
988
|
+
if len(content) > 3
|
|
989
|
+
else content
|
|
990
|
+
)
|
|
991
|
+
return json.dumps(truncated, indent=2)[:max_chars]
|
|
992
|
+
|
|
993
|
+
# Simple truncation
|
|
994
|
+
half = max_chars // 2 - 20
|
|
995
|
+
return f"{content_str[:half]}\n...[truncated]...\n{content_str[-half:]}"
|
|
996
|
+
|
|
997
|
+
def process_query(
|
|
998
|
+
self,
|
|
999
|
+
user_input: str,
|
|
1000
|
+
max_steps: int = None,
|
|
1001
|
+
trace: bool = False,
|
|
1002
|
+
filename: str = None,
|
|
1003
|
+
) -> Dict[str, Any]:
|
|
1004
|
+
"""
|
|
1005
|
+
Process a user query and execute the necessary tools.
|
|
1006
|
+
Displays each step as it's being generated in real-time.
|
|
1007
|
+
|
|
1008
|
+
Args:
|
|
1009
|
+
user_input: User's query or request
|
|
1010
|
+
max_steps: Maximum number of steps to take in the conversation (overrides class default if provided)
|
|
1011
|
+
trace: If True, write detailed JSON trace to file
|
|
1012
|
+
filename: Optional filename for trace output, if None a timestamped name will be generated
|
|
1013
|
+
|
|
1014
|
+
Returns:
|
|
1015
|
+
Dict containing the final result and operation details
|
|
1016
|
+
"""
|
|
1017
|
+
import time
|
|
1018
|
+
|
|
1019
|
+
start_time = time.time() # Track query processing start time
|
|
1020
|
+
|
|
1021
|
+
logger.debug(f"Processing query: {user_input}")
|
|
1022
|
+
conversation = []
|
|
1023
|
+
# Build messages array for chat completions
|
|
1024
|
+
messages = []
|
|
1025
|
+
|
|
1026
|
+
# Prepopulate with conversation history if available (for session persistence)
|
|
1027
|
+
if hasattr(self, "conversation_history") and self.conversation_history:
|
|
1028
|
+
messages.extend(self.conversation_history)
|
|
1029
|
+
logger.debug(
|
|
1030
|
+
f"Loaded {len(self.conversation_history)} messages from conversation history"
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
steps_taken = 0
|
|
1034
|
+
final_answer = None
|
|
1035
|
+
error_count = 0
|
|
1036
|
+
last_tool_call = None # Track the last tool call to prevent loops
|
|
1037
|
+
last_error = None # Track the last error to handle it properly
|
|
1038
|
+
previous_outputs = [] # Track previous tool outputs
|
|
1039
|
+
|
|
1040
|
+
# Reset state management
|
|
1041
|
+
self.execution_state = self.STATE_PLANNING
|
|
1042
|
+
self.current_plan = None
|
|
1043
|
+
self.current_step = 0
|
|
1044
|
+
self.total_plan_steps = 0
|
|
1045
|
+
self.plan_iterations = 0 # Reset plan iteration counter
|
|
1046
|
+
|
|
1047
|
+
# Add user query to the conversation history
|
|
1048
|
+
conversation.append({"role": "user", "content": user_input})
|
|
1049
|
+
messages.append({"role": "user", "content": user_input})
|
|
1050
|
+
|
|
1051
|
+
# Use provided max_steps or fall back to class default
|
|
1052
|
+
steps_limit = max_steps if max_steps is not None else self.max_steps
|
|
1053
|
+
|
|
1054
|
+
# Print initial message with max steps info
|
|
1055
|
+
self.console.print_processing_start(user_input, steps_limit)
|
|
1056
|
+
logger.debug(f"Using max_steps: {steps_limit}")
|
|
1057
|
+
|
|
1058
|
+
prompt = f"User request: {user_input}\n\n"
|
|
1059
|
+
|
|
1060
|
+
# Only add planning reminder in PLANNING state
|
|
1061
|
+
if self.execution_state == self.STATE_PLANNING:
|
|
1062
|
+
prompt += (
|
|
1063
|
+
"IMPORTANT: ALWAYS BEGIN WITH A PLAN before executing any tools.\n"
|
|
1064
|
+
"First create a detailed plan with all necessary steps, then execute the first step.\n"
|
|
1065
|
+
"When creating a plan with multiple steps:\n"
|
|
1066
|
+
" 1. ALWAYS follow the plan in the correct order, starting with the FIRST step.\n"
|
|
1067
|
+
" 2. Include both a plan and a 'tool' field, the 'tool' field MUST match the tool in the first step of the plan.\n"
|
|
1068
|
+
" 3. Create plans with clear, executable steps that include both the tool name and the exact arguments for each step.\n"
|
|
1069
|
+
)
|
|
1070
|
+
|
|
1071
|
+
logger.debug(f"Input prompt: {prompt[:200]}...")
|
|
1072
|
+
|
|
1073
|
+
# Process the query in steps, allowing for multiple tool usages
|
|
1074
|
+
while steps_taken < steps_limit and final_answer is None:
|
|
1075
|
+
# Build the next prompt based on current state (this is for fallback mode only)
|
|
1076
|
+
# In chat mode, we'll just add to messages array
|
|
1077
|
+
steps_taken += 1
|
|
1078
|
+
logger.debug(f"Step {steps_taken}/{steps_limit}")
|
|
1079
|
+
|
|
1080
|
+
# Check if we're at the limit and ask user if they want to continue
|
|
1081
|
+
if steps_taken == steps_limit and final_answer is None:
|
|
1082
|
+
# Show what was accomplished
|
|
1083
|
+
max_steps_msg = self._generate_max_steps_message(
|
|
1084
|
+
conversation, steps_taken, steps_limit
|
|
1085
|
+
)
|
|
1086
|
+
self.console.print_warning(max_steps_msg)
|
|
1087
|
+
|
|
1088
|
+
# Ask user if they want to continue (skip in silent mode OR if stdin is not available)
|
|
1089
|
+
# IMPORTANT: Never call input() in API/CI contexts to avoid blocking threads
|
|
1090
|
+
import sys
|
|
1091
|
+
|
|
1092
|
+
has_stdin = sys.stdin and sys.stdin.isatty()
|
|
1093
|
+
if has_stdin and not (
|
|
1094
|
+
hasattr(self, "silent_mode") and self.silent_mode
|
|
1095
|
+
):
|
|
1096
|
+
try:
|
|
1097
|
+
response = (
|
|
1098
|
+
input("\nContinue with 50 more steps? (y/n): ")
|
|
1099
|
+
.strip()
|
|
1100
|
+
.lower()
|
|
1101
|
+
)
|
|
1102
|
+
if response in ["y", "yes"]:
|
|
1103
|
+
steps_limit += 50
|
|
1104
|
+
self.console.print_info(
|
|
1105
|
+
f"✓ Continuing with {steps_limit} total steps...\n"
|
|
1106
|
+
)
|
|
1107
|
+
else:
|
|
1108
|
+
self.console.print_info("Stopping at user request.")
|
|
1109
|
+
break
|
|
1110
|
+
except (EOFError, KeyboardInterrupt):
|
|
1111
|
+
self.console.print_info("\nStopping at user request.")
|
|
1112
|
+
break
|
|
1113
|
+
else:
|
|
1114
|
+
# Silent mode - just stop
|
|
1115
|
+
break
|
|
1116
|
+
|
|
1117
|
+
# Display current step
|
|
1118
|
+
self.console.print_step_header(steps_taken, steps_limit)
|
|
1119
|
+
|
|
1120
|
+
# Skip automatic finalization for single-step plans - always request proper final answer
|
|
1121
|
+
|
|
1122
|
+
# If we're executing a plan, we might not need to query the LLM again
|
|
1123
|
+
if (
|
|
1124
|
+
self.execution_state == self.STATE_EXECUTING_PLAN
|
|
1125
|
+
and self.current_step < self.total_plan_steps
|
|
1126
|
+
):
|
|
1127
|
+
logger.debug(
|
|
1128
|
+
f"Executing plan step {self.current_step + 1}/{self.total_plan_steps}"
|
|
1129
|
+
)
|
|
1130
|
+
self.console.print_state_info(
|
|
1131
|
+
f"EXECUTING PLAN: Step {self.current_step + 1}/{self.total_plan_steps}"
|
|
1132
|
+
)
|
|
1133
|
+
|
|
1134
|
+
# Display the current plan with the current step highlighted
|
|
1135
|
+
if self.current_plan:
|
|
1136
|
+
self.console.print_plan(self.current_plan, self.current_step)
|
|
1137
|
+
|
|
1138
|
+
# Extract next step from plan
|
|
1139
|
+
next_step = self.current_plan[self.current_step]
|
|
1140
|
+
|
|
1141
|
+
if (
|
|
1142
|
+
isinstance(next_step, dict)
|
|
1143
|
+
and "tool" in next_step
|
|
1144
|
+
and "tool_args" in next_step
|
|
1145
|
+
):
|
|
1146
|
+
# We have a properly formatted step with tool and args
|
|
1147
|
+
tool_name = next_step["tool"]
|
|
1148
|
+
tool_args = next_step["tool_args"]
|
|
1149
|
+
|
|
1150
|
+
# Create a parsed response structure as if it came from the LLM
|
|
1151
|
+
parsed = {
|
|
1152
|
+
"thought": f"Executing step {self.current_step + 1} of the plan",
|
|
1153
|
+
"goal": f"Following the plan to {user_input}",
|
|
1154
|
+
"tool": tool_name,
|
|
1155
|
+
"tool_args": tool_args,
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
# Add to conversation
|
|
1159
|
+
conversation.append({"role": "assistant", "content": parsed})
|
|
1160
|
+
|
|
1161
|
+
# Display the agent's reasoning for the step
|
|
1162
|
+
self.console.print_thought(
|
|
1163
|
+
parsed.get("thought", "Executing plan step")
|
|
1164
|
+
)
|
|
1165
|
+
self.console.print_goal(parsed.get("goal", "Following the plan"))
|
|
1166
|
+
|
|
1167
|
+
# Display the tool call in real-time
|
|
1168
|
+
self.console.print_tool_usage(tool_name)
|
|
1169
|
+
|
|
1170
|
+
# Start progress indicator for tool execution
|
|
1171
|
+
self.console.start_progress(f"Executing {tool_name}")
|
|
1172
|
+
|
|
1173
|
+
# Execute the tool
|
|
1174
|
+
tool_result = self._execute_tool(tool_name, tool_args)
|
|
1175
|
+
|
|
1176
|
+
# Stop progress indicator
|
|
1177
|
+
self.console.stop_progress()
|
|
1178
|
+
|
|
1179
|
+
# Handle domain-specific post-processing
|
|
1180
|
+
self._post_process_tool_result(tool_name, tool_args, tool_result)
|
|
1181
|
+
|
|
1182
|
+
# Handle large tool results
|
|
1183
|
+
truncated_result = self._handle_large_tool_result(
|
|
1184
|
+
tool_name, tool_result, conversation, tool_args
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1187
|
+
# Add tool result to messages array so LLM can see it in next turn
|
|
1188
|
+
# Format as user message for proper chat flow (user → assistant → user → assistant)
|
|
1189
|
+
tool_result_content = f"Tool result: {json.dumps(truncated_result) if isinstance(truncated_result, dict) else truncated_result}"
|
|
1190
|
+
messages.append({"role": "user", "content": tool_result_content})
|
|
1191
|
+
|
|
1192
|
+
# Display the tool result in real-time (show full result to user)
|
|
1193
|
+
self.console.print_tool_complete()
|
|
1194
|
+
|
|
1195
|
+
self.console.pretty_print_json(tool_result, "Tool Result")
|
|
1196
|
+
|
|
1197
|
+
# Store the truncated output for future context
|
|
1198
|
+
previous_outputs.append(
|
|
1199
|
+
{
|
|
1200
|
+
"tool": tool_name,
|
|
1201
|
+
"args": tool_args,
|
|
1202
|
+
"result": truncated_result,
|
|
1203
|
+
}
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
# Share tool output with subsequent LLM calls
|
|
1207
|
+
messages.append(
|
|
1208
|
+
self._create_tool_message(tool_name, truncated_result)
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
# Check for error (support multiple error formats)
|
|
1212
|
+
is_error = isinstance(tool_result, dict) and (
|
|
1213
|
+
tool_result.get("status") == "error" # Standard format
|
|
1214
|
+
or tool_result.get("success")
|
|
1215
|
+
is False # Tools returning success: false
|
|
1216
|
+
or tool_result.get("has_errors") is True # CLI tools
|
|
1217
|
+
or tool_result.get("return_code", 0) != 0 # Build failures
|
|
1218
|
+
)
|
|
1219
|
+
|
|
1220
|
+
if is_error:
|
|
1221
|
+
error_count += 1
|
|
1222
|
+
# Extract error message from various formats
|
|
1223
|
+
last_error = (
|
|
1224
|
+
tool_result.get("error")
|
|
1225
|
+
or tool_result.get("stderr")
|
|
1226
|
+
or tool_result.get("hint") # Many tools provide hints
|
|
1227
|
+
or tool_result.get(
|
|
1228
|
+
"suggested_fix"
|
|
1229
|
+
) # Some tools provide fix suggestions
|
|
1230
|
+
or f"Command failed with return code {tool_result.get('return_code')}"
|
|
1231
|
+
)
|
|
1232
|
+
logger.warning(
|
|
1233
|
+
f"Tool execution error in plan (count: {error_count}): {last_error}"
|
|
1234
|
+
)
|
|
1235
|
+
self.console.print_error(last_error)
|
|
1236
|
+
|
|
1237
|
+
# Switch to error recovery state
|
|
1238
|
+
self.execution_state = self.STATE_ERROR_RECOVERY
|
|
1239
|
+
self.console.print_state_info(
|
|
1240
|
+
"ERROR RECOVERY: Handling tool execution failure"
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
# Break out of plan execution to trigger error recovery prompt
|
|
1244
|
+
continue
|
|
1245
|
+
else:
|
|
1246
|
+
# Success - move to next step in plan
|
|
1247
|
+
self.current_step += 1
|
|
1248
|
+
|
|
1249
|
+
# Check if we've completed the plan
|
|
1250
|
+
if self.current_step >= self.total_plan_steps:
|
|
1251
|
+
logger.debug("Plan execution completed")
|
|
1252
|
+
self.execution_state = self.STATE_COMPLETION
|
|
1253
|
+
self.console.print_state_info(
|
|
1254
|
+
"COMPLETION: Plan fully executed"
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
# Increment plan iteration counter
|
|
1258
|
+
self.plan_iterations += 1
|
|
1259
|
+
logger.debug(
|
|
1260
|
+
f"Plan iteration {self.plan_iterations} completed"
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1263
|
+
# Check if we've reached max plan iterations
|
|
1264
|
+
reached_max_iterations = (
|
|
1265
|
+
self.max_plan_iterations > 0
|
|
1266
|
+
and self.plan_iterations >= self.max_plan_iterations
|
|
1267
|
+
)
|
|
1268
|
+
|
|
1269
|
+
# Prepare message for final answer with the completed plan context
|
|
1270
|
+
plan_context = {
|
|
1271
|
+
"completed_plan": self.current_plan,
|
|
1272
|
+
"total_steps": self.total_plan_steps,
|
|
1273
|
+
}
|
|
1274
|
+
plan_context_raw = json.dumps(plan_context)
|
|
1275
|
+
if len(plan_context_raw) > 20000:
|
|
1276
|
+
plan_context_str = self._truncate_large_content(
|
|
1277
|
+
plan_context, max_chars=20000
|
|
1278
|
+
)
|
|
1279
|
+
else:
|
|
1280
|
+
plan_context_str = plan_context_raw
|
|
1281
|
+
|
|
1282
|
+
if reached_max_iterations:
|
|
1283
|
+
# Force final answer after max iterations
|
|
1284
|
+
completion_message = (
|
|
1285
|
+
f"Maximum plan iterations ({self.max_plan_iterations}) reached for task: {user_input}\n"
|
|
1286
|
+
f"Task: {user_input}\n"
|
|
1287
|
+
f"Plan information:\n{plan_context_str}\n\n"
|
|
1288
|
+
f"IMPORTANT: You MUST now provide a final answer with an honest assessment:\n"
|
|
1289
|
+
f"- Summarize what was successfully accomplished\n"
|
|
1290
|
+
f"- Clearly state if anything remains incomplete or if errors occurred\n"
|
|
1291
|
+
f"- If the task is fully complete, state that clearly\n\n"
|
|
1292
|
+
f'Provide {{"thought": "...", "goal": "...", "answer": "..."}}'
|
|
1293
|
+
)
|
|
1294
|
+
else:
|
|
1295
|
+
completion_message = (
|
|
1296
|
+
"You have successfully completed all steps in the plan.\n"
|
|
1297
|
+
f"Task: {user_input}\n"
|
|
1298
|
+
f"Plan information:\n{plan_context_str}\n\n"
|
|
1299
|
+
f"Plan iteration: {self.plan_iterations}/{self.max_plan_iterations if self.max_plan_iterations > 0 else 'unlimited'}\n"
|
|
1300
|
+
"Check if more work is needed:\n"
|
|
1301
|
+
"- If the task is complete and verified, provide a final answer\n"
|
|
1302
|
+
"- If critical validation/testing is needed, you may create ONE more plan\n"
|
|
1303
|
+
"- Only create additional plans if absolutely necessary\n\n"
|
|
1304
|
+
'If more work needed: Provide a NEW plan with {{"thought": "...", "goal": "...", "plan": [...]}}\n'
|
|
1305
|
+
'If everything is complete: Provide {{"thought": "...", "goal": "...", "answer": "..."}}'
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
# Debug logging - only show if truncation happened
|
|
1309
|
+
if self.debug and len(plan_context_raw) > 2000:
|
|
1310
|
+
print(
|
|
1311
|
+
"\n[DEBUG] Plan context truncated for completion message"
|
|
1312
|
+
)
|
|
1313
|
+
|
|
1314
|
+
# Add completion request to messages
|
|
1315
|
+
messages.append(
|
|
1316
|
+
{"role": "user", "content": completion_message}
|
|
1317
|
+
)
|
|
1318
|
+
|
|
1319
|
+
# Send the completion prompt to get final answer
|
|
1320
|
+
self.console.print_step_header(steps_taken, steps_limit)
|
|
1321
|
+
self.console.print_state_info(
|
|
1322
|
+
"COMPLETION: Requesting final answer"
|
|
1323
|
+
)
|
|
1324
|
+
|
|
1325
|
+
# Continue to next iteration to get final answer
|
|
1326
|
+
continue
|
|
1327
|
+
else:
|
|
1328
|
+
# Continue with next step - no need to query LLM again
|
|
1329
|
+
continue
|
|
1330
|
+
else:
|
|
1331
|
+
# Plan step doesn't have proper format, fall back to LLM
|
|
1332
|
+
logger.warning(
|
|
1333
|
+
f"Plan step {self.current_step + 1} doesn't have proper format: {next_step}"
|
|
1334
|
+
)
|
|
1335
|
+
self.console.print_warning(
|
|
1336
|
+
f"Plan step {self.current_step + 1} format incorrect, asking LLM for guidance"
|
|
1337
|
+
)
|
|
1338
|
+
prompt = (
|
|
1339
|
+
f"You are following a plan but step {self.current_step + 1} doesn't have proper format: {next_step}\n"
|
|
1340
|
+
"Please interpret this step and decide what tool to use next.\n\n"
|
|
1341
|
+
f"Task: {user_input}\n\n"
|
|
1342
|
+
)
|
|
1343
|
+
else:
|
|
1344
|
+
# Normal execution flow - query the LLM
|
|
1345
|
+
if self.execution_state == self.STATE_DIRECT_EXECUTION:
|
|
1346
|
+
self.console.print_state_info("DIRECT EXECUTION: Analyzing task")
|
|
1347
|
+
elif self.execution_state == self.STATE_PLANNING:
|
|
1348
|
+
self.console.print_state_info("PLANNING: Creating or refining plan")
|
|
1349
|
+
elif self.execution_state == self.STATE_ERROR_RECOVERY:
|
|
1350
|
+
self.console.print_state_info(
|
|
1351
|
+
"ERROR RECOVERY: Handling previous error"
|
|
1352
|
+
)
|
|
1353
|
+
|
|
1354
|
+
# Truncate previous outputs if too large to avoid overwhelming the LLM
|
|
1355
|
+
truncated_outputs = (
|
|
1356
|
+
self._truncate_large_content(previous_outputs, max_chars=500)
|
|
1357
|
+
if previous_outputs
|
|
1358
|
+
else "None"
|
|
1359
|
+
)
|
|
1360
|
+
|
|
1361
|
+
# Create a specific error recovery prompt
|
|
1362
|
+
prompt = (
|
|
1363
|
+
"TOOL EXECUTION FAILED!\n\n"
|
|
1364
|
+
f"You were trying to execute: {last_tool_call[0] if last_tool_call else 'unknown tool'}\n"
|
|
1365
|
+
f"Error: {last_error}\n\n"
|
|
1366
|
+
f"Original task: {user_input}\n\n"
|
|
1367
|
+
f"Current plan step {self.current_step + 1}/{self.total_plan_steps} failed.\n"
|
|
1368
|
+
f"Current plan: {self.current_plan}\n\n"
|
|
1369
|
+
f"Previous successful outputs: {truncated_outputs}\n\n"
|
|
1370
|
+
"INSTRUCTIONS:\n"
|
|
1371
|
+
"1. Analyze the error and understand what went wrong\n"
|
|
1372
|
+
"2. Create a NEW corrected plan that fixes the error\n"
|
|
1373
|
+
"3. Make sure to use correct tool parameters (check the available tools)\n"
|
|
1374
|
+
"4. Start executing the corrected plan\n\n"
|
|
1375
|
+
"Respond with your analysis, a corrected plan, and the first tool to execute."
|
|
1376
|
+
)
|
|
1377
|
+
|
|
1378
|
+
# Add the error recovery prompt to the messages array so it gets sent to LLM
|
|
1379
|
+
messages.append({"role": "user", "content": prompt})
|
|
1380
|
+
|
|
1381
|
+
# Reset state to planning after creating recovery prompt
|
|
1382
|
+
self.execution_state = self.STATE_PLANNING
|
|
1383
|
+
self.current_plan = None
|
|
1384
|
+
self.current_step = 0
|
|
1385
|
+
self.total_plan_steps = 0
|
|
1386
|
+
|
|
1387
|
+
elif self.execution_state == self.STATE_COMPLETION:
|
|
1388
|
+
self.console.print_state_info("COMPLETION: Finalizing response")
|
|
1389
|
+
|
|
1390
|
+
# Print the prompt if show_prompts is enabled (separate from debug_prompts)
|
|
1391
|
+
if self.show_prompts:
|
|
1392
|
+
# Build context from system prompt and messages
|
|
1393
|
+
context_parts = [
|
|
1394
|
+
(
|
|
1395
|
+
f"SYSTEM: {self.system_prompt[:200]}..."
|
|
1396
|
+
if len(self.system_prompt) > 200
|
|
1397
|
+
else f"SYSTEM: {self.system_prompt}"
|
|
1398
|
+
)
|
|
1399
|
+
]
|
|
1400
|
+
|
|
1401
|
+
for msg in messages:
|
|
1402
|
+
role = msg.get("role", "user").upper()
|
|
1403
|
+
content = str(msg.get("content", ""))[:150]
|
|
1404
|
+
context_parts.append(
|
|
1405
|
+
f"{role}: {content}{'...' if len(str(msg.get('content', ''))) > 150 else ''}"
|
|
1406
|
+
)
|
|
1407
|
+
|
|
1408
|
+
if not messages and prompt:
|
|
1409
|
+
context_parts.append(
|
|
1410
|
+
f"USER: {prompt[:150]}{'...' if len(prompt) > 150 else ''}"
|
|
1411
|
+
)
|
|
1412
|
+
|
|
1413
|
+
self.console.print_prompt("\n".join(context_parts), "LLM Context")
|
|
1414
|
+
|
|
1415
|
+
# Handle streaming or non-streaming LLM response
|
|
1416
|
+
# Initialize response_stats so it's always in scope
|
|
1417
|
+
response_stats = None
|
|
1418
|
+
|
|
1419
|
+
if self.streaming:
|
|
1420
|
+
# Streaming mode - raw response will be streamed
|
|
1421
|
+
# (SilentConsole will suppress this, AgentConsole will show it)
|
|
1422
|
+
|
|
1423
|
+
# Add prompt to conversation if debug is enabled
|
|
1424
|
+
if self.debug_prompts:
|
|
1425
|
+
conversation.append(
|
|
1426
|
+
{"role": "system", "content": {"prompt": prompt}}
|
|
1427
|
+
)
|
|
1428
|
+
# Print the prompt if show_prompts is enabled
|
|
1429
|
+
if self.show_prompts:
|
|
1430
|
+
self.console.print_prompt(
|
|
1431
|
+
prompt, f"Prompt (Step {steps_taken})"
|
|
1432
|
+
)
|
|
1433
|
+
|
|
1434
|
+
# Get streaming response from ChatSDK with proper conversation history
|
|
1435
|
+
try:
|
|
1436
|
+
response_stream = self.chat.send_messages_stream(
|
|
1437
|
+
messages=messages, system_prompt=self.system_prompt
|
|
1438
|
+
)
|
|
1439
|
+
|
|
1440
|
+
# Process the streaming response chunks as they arrive
|
|
1441
|
+
full_response = ""
|
|
1442
|
+
for chunk_response in response_stream:
|
|
1443
|
+
if chunk_response.is_complete:
|
|
1444
|
+
response_stats = chunk_response.stats
|
|
1445
|
+
else:
|
|
1446
|
+
self.console.print_streaming_text(chunk_response.text)
|
|
1447
|
+
full_response += chunk_response.text
|
|
1448
|
+
|
|
1449
|
+
self.console.print_streaming_text("", end_of_stream=True)
|
|
1450
|
+
response = full_response
|
|
1451
|
+
except ConnectionError as e:
|
|
1452
|
+
# Handle LLM server connection errors specifically
|
|
1453
|
+
error_msg = f"LLM Server Connection Failed (streaming): {str(e)}"
|
|
1454
|
+
logger.error(error_msg)
|
|
1455
|
+
self.console.print_error(error_msg)
|
|
1456
|
+
|
|
1457
|
+
# Add error to history
|
|
1458
|
+
self.error_history.append(
|
|
1459
|
+
{
|
|
1460
|
+
"step": steps_taken,
|
|
1461
|
+
"error": error_msg,
|
|
1462
|
+
"type": "llm_connection_error",
|
|
1463
|
+
}
|
|
1464
|
+
)
|
|
1465
|
+
|
|
1466
|
+
# Return error response
|
|
1467
|
+
final_answer = (
|
|
1468
|
+
f"Unable to complete task due to LLM server error: {str(e)}"
|
|
1469
|
+
)
|
|
1470
|
+
break
|
|
1471
|
+
except Exception as e:
|
|
1472
|
+
logger.error(f"Unexpected error during streaming: {e}")
|
|
1473
|
+
|
|
1474
|
+
# Add to error history
|
|
1475
|
+
self.error_history.append(
|
|
1476
|
+
{
|
|
1477
|
+
"step": steps_taken,
|
|
1478
|
+
"error": str(e),
|
|
1479
|
+
"type": "llm_streaming_error",
|
|
1480
|
+
}
|
|
1481
|
+
)
|
|
1482
|
+
|
|
1483
|
+
# Return error response
|
|
1484
|
+
final_answer = (
|
|
1485
|
+
f"Unable to complete task due to streaming error: {str(e)}"
|
|
1486
|
+
)
|
|
1487
|
+
break
|
|
1488
|
+
else:
|
|
1489
|
+
# Use progress indicator for non-streaming mode
|
|
1490
|
+
self.console.start_progress("Thinking")
|
|
1491
|
+
|
|
1492
|
+
# Debug logging before LLM call
|
|
1493
|
+
if self.debug:
|
|
1494
|
+
|
|
1495
|
+
print(f"\n[DEBUG] About to call LLM with {len(messages)} messages")
|
|
1496
|
+
print(
|
|
1497
|
+
f"[DEBUG] Last message role: {messages[-1]['role'] if messages else 'No messages'}"
|
|
1498
|
+
)
|
|
1499
|
+
if messages and len(messages[-1].get("content", "")) < 500:
|
|
1500
|
+
print(
|
|
1501
|
+
f"[DEBUG] Last message content: {messages[-1]['content']}"
|
|
1502
|
+
)
|
|
1503
|
+
else:
|
|
1504
|
+
print(
|
|
1505
|
+
f"[DEBUG] Last message content length: {len(messages[-1].get('content', ''))}"
|
|
1506
|
+
)
|
|
1507
|
+
print(f"[DEBUG] Execution state: {self.execution_state}")
|
|
1508
|
+
if self.execution_state == "PLANNING":
|
|
1509
|
+
print("[DEBUG] Current step: Planning (no active plan yet)")
|
|
1510
|
+
else:
|
|
1511
|
+
print(
|
|
1512
|
+
f"[DEBUG] Current step: {self.current_step}/{self.total_plan_steps}"
|
|
1513
|
+
)
|
|
1514
|
+
|
|
1515
|
+
# Get complete response from ChatSDK
|
|
1516
|
+
try:
|
|
1517
|
+
chat_response = self.chat.send_messages(
|
|
1518
|
+
messages=messages, system_prompt=self.system_prompt
|
|
1519
|
+
)
|
|
1520
|
+
response = chat_response.text
|
|
1521
|
+
response_stats = chat_response.stats
|
|
1522
|
+
except ConnectionError as e:
|
|
1523
|
+
error_msg = f"LLM Server Connection Failed: {str(e)}"
|
|
1524
|
+
logger.error(error_msg)
|
|
1525
|
+
self.console.print_error(error_msg)
|
|
1526
|
+
|
|
1527
|
+
# Add error to history and update state
|
|
1528
|
+
self.error_history.append(
|
|
1529
|
+
{
|
|
1530
|
+
"step": steps_taken,
|
|
1531
|
+
"error": error_msg,
|
|
1532
|
+
"type": "llm_connection_error",
|
|
1533
|
+
}
|
|
1534
|
+
)
|
|
1535
|
+
|
|
1536
|
+
# Return error response
|
|
1537
|
+
final_answer = (
|
|
1538
|
+
f"Unable to complete task due to LLM server error: {str(e)}"
|
|
1539
|
+
)
|
|
1540
|
+
break
|
|
1541
|
+
except Exception as e:
|
|
1542
|
+
if self.debug:
|
|
1543
|
+
print(f"[DEBUG] Error calling LLM: {e}")
|
|
1544
|
+
logger.error(f"Unexpected error calling LLM: {e}")
|
|
1545
|
+
|
|
1546
|
+
# Add to error history
|
|
1547
|
+
self.error_history.append(
|
|
1548
|
+
{"step": steps_taken, "error": str(e), "type": "llm_error"}
|
|
1549
|
+
)
|
|
1550
|
+
|
|
1551
|
+
# Return error response
|
|
1552
|
+
final_answer = f"Unable to complete task due to error: {str(e)}"
|
|
1553
|
+
break
|
|
1554
|
+
|
|
1555
|
+
# Stop the progress indicator
|
|
1556
|
+
self.console.stop_progress()
|
|
1557
|
+
|
|
1558
|
+
# Print the LLM response to the console
|
|
1559
|
+
logger.debug(f"LLM response: {response[:200]}...")
|
|
1560
|
+
if self.show_prompts:
|
|
1561
|
+
self.console.print_response(response, "LLM Response")
|
|
1562
|
+
|
|
1563
|
+
# Parse the response
|
|
1564
|
+
parsed = self._parse_llm_response(response)
|
|
1565
|
+
logger.debug(f"Parsed response: {parsed}")
|
|
1566
|
+
conversation.append({"role": "assistant", "content": parsed})
|
|
1567
|
+
|
|
1568
|
+
# Add assistant response to messages for chat history
|
|
1569
|
+
messages.append({"role": "assistant", "content": response})
|
|
1570
|
+
|
|
1571
|
+
# Validate the response has a plan if required
|
|
1572
|
+
self._validate_plan_required(parsed, steps_taken)
|
|
1573
|
+
|
|
1574
|
+
# If the LLM needs to create a plan first, re-prompt it specifically for that
|
|
1575
|
+
if "needs_plan" in parsed and parsed["needs_plan"]:
|
|
1576
|
+
# Prepare a special prompt that specifically requests a plan
|
|
1577
|
+
deferred_tool = parsed.get("deferred_tool", None)
|
|
1578
|
+
deferred_args = parsed.get("deferred_tool_args", {})
|
|
1579
|
+
|
|
1580
|
+
plan_prompt = (
|
|
1581
|
+
"You MUST create a detailed plan first before taking any action.\n\n"
|
|
1582
|
+
f"User request: {user_input}\n\n"
|
|
1583
|
+
)
|
|
1584
|
+
|
|
1585
|
+
if deferred_tool:
|
|
1586
|
+
plan_prompt += (
|
|
1587
|
+
f"You initially wanted to use the {deferred_tool} tool with these arguments:\n"
|
|
1588
|
+
f"{json.dumps(deferred_args, indent=2)}\n\n"
|
|
1589
|
+
"However, you MUST first create a plan. Please create a plan that includes this tool usage as a step.\n\n"
|
|
1590
|
+
)
|
|
1591
|
+
|
|
1592
|
+
plan_prompt += (
|
|
1593
|
+
"Create a detailed plan with all necessary steps in JSON format, including exact tool names and arguments.\n"
|
|
1594
|
+
"Respond with your reasoning, plan, and the first tool to use."
|
|
1595
|
+
)
|
|
1596
|
+
|
|
1597
|
+
# Store the plan prompt in conversation if debug is enabled
|
|
1598
|
+
if self.debug_prompts:
|
|
1599
|
+
conversation.append(
|
|
1600
|
+
{"role": "system", "content": {"prompt": plan_prompt}}
|
|
1601
|
+
)
|
|
1602
|
+
if self.show_prompts:
|
|
1603
|
+
self.console.print_prompt(plan_prompt, "Plan Request Prompt")
|
|
1604
|
+
|
|
1605
|
+
# Notify the user we're asking for a plan
|
|
1606
|
+
self.console.print_info("Requesting a detailed plan before proceeding")
|
|
1607
|
+
|
|
1608
|
+
# Get the planning response
|
|
1609
|
+
if self.streaming:
|
|
1610
|
+
# Add prompt to conversation if debug is enabled
|
|
1611
|
+
if self.debug_prompts:
|
|
1612
|
+
conversation.append(
|
|
1613
|
+
{"role": "system", "content": {"prompt": plan_prompt}}
|
|
1614
|
+
)
|
|
1615
|
+
# Print the prompt if show_prompts is enabled
|
|
1616
|
+
if self.show_prompts:
|
|
1617
|
+
self.console.print_prompt(
|
|
1618
|
+
plan_prompt, f"Prompt (Step {steps_taken})"
|
|
1619
|
+
)
|
|
1620
|
+
|
|
1621
|
+
# Handle streaming as before
|
|
1622
|
+
full_response = ""
|
|
1623
|
+
# Add plan request to messages
|
|
1624
|
+
messages.append({"role": "user", "content": plan_prompt})
|
|
1625
|
+
|
|
1626
|
+
# Use ChatSDK for streaming plan response
|
|
1627
|
+
stream_gen = self.chat.send_messages_stream(
|
|
1628
|
+
messages=messages, system_prompt=self.system_prompt
|
|
1629
|
+
)
|
|
1630
|
+
|
|
1631
|
+
for chunk_response in stream_gen:
|
|
1632
|
+
if not chunk_response.is_complete:
|
|
1633
|
+
chunk = chunk_response.text
|
|
1634
|
+
if hasattr(self.console, "print_streaming_text"):
|
|
1635
|
+
self.console.print_streaming_text(chunk)
|
|
1636
|
+
else:
|
|
1637
|
+
print(chunk, end="", flush=True)
|
|
1638
|
+
full_response += chunk
|
|
1639
|
+
|
|
1640
|
+
if hasattr(self.console, "print_streaming_text"):
|
|
1641
|
+
self.console.print_streaming_text("", end_of_stream=True)
|
|
1642
|
+
else:
|
|
1643
|
+
print("", flush=True)
|
|
1644
|
+
|
|
1645
|
+
plan_response = full_response
|
|
1646
|
+
else:
|
|
1647
|
+
# Use progress indicator for non-streaming mode
|
|
1648
|
+
self.console.start_progress("Creating plan")
|
|
1649
|
+
|
|
1650
|
+
# Store the plan prompt in conversation if debug is enabled
|
|
1651
|
+
if self.debug_prompts:
|
|
1652
|
+
conversation.append(
|
|
1653
|
+
{"role": "system", "content": {"prompt": plan_prompt}}
|
|
1654
|
+
)
|
|
1655
|
+
if self.show_prompts:
|
|
1656
|
+
self.console.print_prompt(
|
|
1657
|
+
plan_prompt, "Plan Request Prompt"
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
# Add plan request to messages
|
|
1661
|
+
messages.append({"role": "user", "content": plan_prompt})
|
|
1662
|
+
|
|
1663
|
+
# Use ChatSDK for non-streaming plan response
|
|
1664
|
+
chat_response = self.chat.send_messages(
|
|
1665
|
+
messages=messages, system_prompt=self.system_prompt
|
|
1666
|
+
)
|
|
1667
|
+
plan_response = chat_response.text
|
|
1668
|
+
self.console.stop_progress()
|
|
1669
|
+
|
|
1670
|
+
# Parse the plan response
|
|
1671
|
+
parsed_plan = self._parse_llm_response(plan_response)
|
|
1672
|
+
logger.debug(f"Parsed plan response: {parsed_plan}")
|
|
1673
|
+
conversation.append({"role": "assistant", "content": parsed_plan})
|
|
1674
|
+
|
|
1675
|
+
# Add plan response to messages for chat history
|
|
1676
|
+
messages.append({"role": "assistant", "content": plan_response})
|
|
1677
|
+
|
|
1678
|
+
# Display the agent's reasoning for the plan
|
|
1679
|
+
self.console.print_thought(parsed_plan.get("thought", "Creating plan"))
|
|
1680
|
+
self.console.print_goal(parsed_plan.get("goal", "Planning for task"))
|
|
1681
|
+
|
|
1682
|
+
# Set the parsed response to the new plan for further processing
|
|
1683
|
+
parsed = parsed_plan
|
|
1684
|
+
|
|
1685
|
+
# Display the agent's reasoning in real-time (only if provided)
|
|
1686
|
+
thought = parsed.get("thought", "").strip()
|
|
1687
|
+
goal = parsed.get("goal", "").strip()
|
|
1688
|
+
|
|
1689
|
+
if thought and thought != "No explicit reasoning provided":
|
|
1690
|
+
self.console.print_thought(thought)
|
|
1691
|
+
|
|
1692
|
+
if goal and goal != "No explicit goal provided":
|
|
1693
|
+
self.console.print_goal(goal)
|
|
1694
|
+
|
|
1695
|
+
# Process plan if available
|
|
1696
|
+
if "plan" in parsed:
|
|
1697
|
+
# Validate that plan is actually a list, not a string or other type
|
|
1698
|
+
if not isinstance(parsed["plan"], list):
|
|
1699
|
+
logger.error(
|
|
1700
|
+
f"Invalid plan format: expected list, got {type(parsed['plan']).__name__}. "
|
|
1701
|
+
f"Plan content: {parsed['plan']}"
|
|
1702
|
+
)
|
|
1703
|
+
self.console.print_error(
|
|
1704
|
+
f"LLM returned invalid plan format (expected array, got {type(parsed['plan']).__name__}). "
|
|
1705
|
+
"Asking for correction..."
|
|
1706
|
+
)
|
|
1707
|
+
|
|
1708
|
+
# Create error recovery prompt
|
|
1709
|
+
error_msg = (
|
|
1710
|
+
"ERROR: You provided a plan in the wrong format.\n"
|
|
1711
|
+
"Expected: an array of step objects\n"
|
|
1712
|
+
f"You provided: {type(parsed['plan']).__name__}\n\n"
|
|
1713
|
+
"The correct format is:\n"
|
|
1714
|
+
f'{{"plan": [{{"tool": "tool_name", "tool_args": {{...}}, "description": "..."}}]}}\n\n'
|
|
1715
|
+
f"Please create a proper plan as an array of step objects for: {user_input}"
|
|
1716
|
+
)
|
|
1717
|
+
messages.append({"role": "user", "content": error_msg})
|
|
1718
|
+
|
|
1719
|
+
# Continue to next iteration to get corrected plan
|
|
1720
|
+
continue
|
|
1721
|
+
|
|
1722
|
+
# Validate that plan items are dictionaries with required fields
|
|
1723
|
+
invalid_steps = []
|
|
1724
|
+
for i, step in enumerate(parsed["plan"]):
|
|
1725
|
+
if not isinstance(step, dict):
|
|
1726
|
+
invalid_steps.append((i, type(step).__name__, step))
|
|
1727
|
+
elif "tool" not in step or "tool_args" not in step:
|
|
1728
|
+
invalid_steps.append((i, "missing fields", step))
|
|
1729
|
+
|
|
1730
|
+
if invalid_steps:
|
|
1731
|
+
logger.error(f"Invalid plan steps found: {invalid_steps}")
|
|
1732
|
+
self.console.print_error(
|
|
1733
|
+
f"Plan contains {len(invalid_steps)} invalid step(s). Asking for correction..."
|
|
1734
|
+
)
|
|
1735
|
+
|
|
1736
|
+
# Create detailed error message
|
|
1737
|
+
error_details = "\n".join(
|
|
1738
|
+
[
|
|
1739
|
+
f"Step {i+1}: {issue} - {step}"
|
|
1740
|
+
for i, issue, step in invalid_steps[
|
|
1741
|
+
:3
|
|
1742
|
+
] # Show first 3 errors
|
|
1743
|
+
]
|
|
1744
|
+
)
|
|
1745
|
+
|
|
1746
|
+
error_msg = (
|
|
1747
|
+
f"ERROR: Your plan contains invalid steps:\n{error_details}\n\n"
|
|
1748
|
+
f"Each step must be a dictionary with 'tool' and 'tool_args' fields:\n"
|
|
1749
|
+
f'{{"tool": "tool_name", "tool_args": {{...}}, "description": "..."}}\n\n'
|
|
1750
|
+
f"Please create a corrected plan for: {user_input}"
|
|
1751
|
+
)
|
|
1752
|
+
messages.append({"role": "user", "content": error_msg})
|
|
1753
|
+
|
|
1754
|
+
# Continue to next iteration to get corrected plan
|
|
1755
|
+
continue
|
|
1756
|
+
|
|
1757
|
+
# Plan is valid - proceed with execution
|
|
1758
|
+
self.current_plan = parsed["plan"]
|
|
1759
|
+
self.current_step = 0
|
|
1760
|
+
self.total_plan_steps = len(self.current_plan)
|
|
1761
|
+
self.execution_state = self.STATE_EXECUTING_PLAN
|
|
1762
|
+
logger.debug(
|
|
1763
|
+
f"New plan created with {self.total_plan_steps} steps: {self.current_plan}"
|
|
1764
|
+
)
|
|
1765
|
+
|
|
1766
|
+
# If the response contains a tool call, execute it
|
|
1767
|
+
if "tool" in parsed and "tool_args" in parsed:
|
|
1768
|
+
|
|
1769
|
+
# Display the current plan with the current step highlighted
|
|
1770
|
+
if self.current_plan:
|
|
1771
|
+
self.console.print_plan(self.current_plan, self.current_step)
|
|
1772
|
+
|
|
1773
|
+
# When both plan and tool are present, prioritize the plan execution
|
|
1774
|
+
# If we have a plan, we should execute from the plan, not the standalone tool call
|
|
1775
|
+
if "plan" in parsed and self.current_plan and self.total_plan_steps > 0:
|
|
1776
|
+
# Skip the standalone tool execution and let the plan execution handle it
|
|
1777
|
+
# The plan execution logic will handle this in the next iteration
|
|
1778
|
+
logger.debug(
|
|
1779
|
+
"Plan and tool both present - deferring to plan execution logic"
|
|
1780
|
+
)
|
|
1781
|
+
continue # Skip tool execution, let plan execution handle it
|
|
1782
|
+
|
|
1783
|
+
# If this was a single-step plan, mark as completed after tool execution
|
|
1784
|
+
if self.total_plan_steps == 1:
|
|
1785
|
+
logger.debug(
|
|
1786
|
+
"Single-step plan will be marked completed after tool execution"
|
|
1787
|
+
)
|
|
1788
|
+
self.execution_state = self.STATE_COMPLETION
|
|
1789
|
+
|
|
1790
|
+
tool_name = parsed["tool"]
|
|
1791
|
+
tool_args = parsed["tool_args"]
|
|
1792
|
+
logger.debug(f"Tool call detected: {tool_name} with args {tool_args}")
|
|
1793
|
+
|
|
1794
|
+
# Display the tool call in real-time
|
|
1795
|
+
self.console.print_tool_usage(tool_name)
|
|
1796
|
+
|
|
1797
|
+
if tool_args:
|
|
1798
|
+
self.console.pretty_print_json(tool_args, "Arguments")
|
|
1799
|
+
|
|
1800
|
+
# Start progress indicator for tool execution
|
|
1801
|
+
self.console.start_progress(f"Executing {tool_name}")
|
|
1802
|
+
|
|
1803
|
+
# Check for repeated tool calls
|
|
1804
|
+
if last_tool_call == (tool_name, str(tool_args)):
|
|
1805
|
+
# Stop progress indicator
|
|
1806
|
+
self.console.stop_progress()
|
|
1807
|
+
|
|
1808
|
+
logger.warning(f"Detected repeated tool call: {tool_name}")
|
|
1809
|
+
# Force a final answer if the same tool is called repeatedly
|
|
1810
|
+
final_answer = (
|
|
1811
|
+
f"Task completed with {tool_name}. No further action needed."
|
|
1812
|
+
)
|
|
1813
|
+
|
|
1814
|
+
self.console.print_repeated_tool_warning()
|
|
1815
|
+
break
|
|
1816
|
+
|
|
1817
|
+
# Execute the tool
|
|
1818
|
+
tool_result = self._execute_tool(tool_name, tool_args)
|
|
1819
|
+
|
|
1820
|
+
# Stop progress indicator
|
|
1821
|
+
self.console.stop_progress()
|
|
1822
|
+
|
|
1823
|
+
# Handle domain-specific post-processing
|
|
1824
|
+
self._post_process_tool_result(tool_name, tool_args, tool_result)
|
|
1825
|
+
|
|
1826
|
+
# Handle large tool results
|
|
1827
|
+
truncated_result = self._handle_large_tool_result(
|
|
1828
|
+
tool_name, tool_result, conversation, tool_args
|
|
1829
|
+
)
|
|
1830
|
+
|
|
1831
|
+
# Display the tool result in real-time (show full result to user)
|
|
1832
|
+
self.console.print_tool_complete()
|
|
1833
|
+
|
|
1834
|
+
self.console.pretty_print_json(tool_result, "Result")
|
|
1835
|
+
|
|
1836
|
+
# Store the truncated output for future context
|
|
1837
|
+
previous_outputs.append(
|
|
1838
|
+
{"tool": tool_name, "args": tool_args, "result": truncated_result}
|
|
1839
|
+
)
|
|
1840
|
+
|
|
1841
|
+
# Share tool output with subsequent LLM calls
|
|
1842
|
+
messages.append(self._create_tool_message(tool_name, truncated_result))
|
|
1843
|
+
|
|
1844
|
+
# Update last tool call
|
|
1845
|
+
last_tool_call = (tool_name, str(tool_args))
|
|
1846
|
+
|
|
1847
|
+
# For single-step plans, we still need to let the LLM process the result
|
|
1848
|
+
# This is especially important for RAG queries where the LLM needs to
|
|
1849
|
+
# synthesize the retrieved information into a coherent answer
|
|
1850
|
+
if (
|
|
1851
|
+
self.execution_state == self.STATE_COMPLETION
|
|
1852
|
+
and self.total_plan_steps == 1
|
|
1853
|
+
):
|
|
1854
|
+
logger.debug(
|
|
1855
|
+
"Single-step plan execution completed, requesting final answer from LLM"
|
|
1856
|
+
)
|
|
1857
|
+
# Don't break here - let the loop continue so the LLM can process the tool result
|
|
1858
|
+
# The tool result has already been added to messages, so the next iteration
|
|
1859
|
+
# will call the LLM with that result
|
|
1860
|
+
|
|
1861
|
+
# Check if tool execution resulted in an error (support multiple error formats)
|
|
1862
|
+
is_error = isinstance(tool_result, dict) and (
|
|
1863
|
+
tool_result.get("status") == "error"
|
|
1864
|
+
or tool_result.get("success") is False
|
|
1865
|
+
or tool_result.get("has_errors") is True
|
|
1866
|
+
or tool_result.get("return_code", 0) != 0
|
|
1867
|
+
)
|
|
1868
|
+
if is_error:
|
|
1869
|
+
error_count += 1
|
|
1870
|
+
last_error = (
|
|
1871
|
+
tool_result.get("error")
|
|
1872
|
+
or tool_result.get("stderr")
|
|
1873
|
+
or tool_result.get("hint")
|
|
1874
|
+
or tool_result.get("suggested_fix")
|
|
1875
|
+
or f"Command failed with return code {tool_result.get('return_code')}"
|
|
1876
|
+
)
|
|
1877
|
+
logger.warning(
|
|
1878
|
+
f"Tool execution error in plan (count: {error_count}): {last_error}"
|
|
1879
|
+
)
|
|
1880
|
+
self.console.print_error(last_error)
|
|
1881
|
+
|
|
1882
|
+
# Switch to error recovery state
|
|
1883
|
+
self.execution_state = self.STATE_ERROR_RECOVERY
|
|
1884
|
+
self.console.print_state_info(
|
|
1885
|
+
"ERROR RECOVERY: Handling tool execution failure"
|
|
1886
|
+
)
|
|
1887
|
+
|
|
1888
|
+
# Break out of tool execution to trigger error recovery prompt
|
|
1889
|
+
continue
|
|
1890
|
+
|
|
1891
|
+
# Collect and store performance stats for token tracking
|
|
1892
|
+
# Do this BEFORE checking for final answer so stats are always collected
|
|
1893
|
+
perf_stats = response_stats or self.chat.get_stats()
|
|
1894
|
+
if perf_stats:
|
|
1895
|
+
conversation.append(
|
|
1896
|
+
{
|
|
1897
|
+
"role": "system",
|
|
1898
|
+
"content": {
|
|
1899
|
+
"type": "stats",
|
|
1900
|
+
"step": steps_taken,
|
|
1901
|
+
"performance_stats": perf_stats,
|
|
1902
|
+
},
|
|
1903
|
+
}
|
|
1904
|
+
)
|
|
1905
|
+
|
|
1906
|
+
# Check for final answer (after collecting stats)
|
|
1907
|
+
if "answer" in parsed:
|
|
1908
|
+
final_answer = parsed["answer"]
|
|
1909
|
+
self.execution_state = self.STATE_COMPLETION
|
|
1910
|
+
self.console.print_final_answer(final_answer, streaming=self.streaming)
|
|
1911
|
+
break
|
|
1912
|
+
|
|
1913
|
+
# Validate plan required
|
|
1914
|
+
self._validate_plan_required(parsed, steps_taken)
|
|
1915
|
+
|
|
1916
|
+
# Print completion message
|
|
1917
|
+
self.console.print_completion(steps_taken, steps_limit)
|
|
1918
|
+
|
|
1919
|
+
# Calculate total duration
|
|
1920
|
+
total_duration = time.time() - start_time
|
|
1921
|
+
|
|
1922
|
+
# Aggregate token counts from conversation stats
|
|
1923
|
+
total_input_tokens = 0
|
|
1924
|
+
total_output_tokens = 0
|
|
1925
|
+
for entry in conversation:
|
|
1926
|
+
if entry.get("role") == "system" and isinstance(entry.get("content"), dict):
|
|
1927
|
+
content = entry["content"]
|
|
1928
|
+
if content.get("type") == "stats" and "performance_stats" in content:
|
|
1929
|
+
stats = content["performance_stats"]
|
|
1930
|
+
if stats.get("input_tokens") is not None:
|
|
1931
|
+
total_input_tokens += stats["input_tokens"]
|
|
1932
|
+
if stats.get("output_tokens") is not None:
|
|
1933
|
+
total_output_tokens += stats["output_tokens"]
|
|
1934
|
+
|
|
1935
|
+
# Return the result
|
|
1936
|
+
has_errors = len(self.error_history) > 0
|
|
1937
|
+
has_valid_answer = (
|
|
1938
|
+
final_answer and final_answer.strip()
|
|
1939
|
+
) # Check for non-empty answer
|
|
1940
|
+
result = {
|
|
1941
|
+
"status": (
|
|
1942
|
+
"success"
|
|
1943
|
+
if has_valid_answer and not has_errors
|
|
1944
|
+
else ("failed" if has_errors else "incomplete")
|
|
1945
|
+
),
|
|
1946
|
+
"result": (
|
|
1947
|
+
final_answer
|
|
1948
|
+
if final_answer
|
|
1949
|
+
else self._generate_max_steps_message(
|
|
1950
|
+
conversation, steps_taken, steps_limit
|
|
1951
|
+
)
|
|
1952
|
+
),
|
|
1953
|
+
"system_prompt": self.system_prompt, # Include system prompt in the result
|
|
1954
|
+
"conversation": conversation,
|
|
1955
|
+
"steps_taken": steps_taken,
|
|
1956
|
+
"duration": total_duration, # Total query processing time in seconds
|
|
1957
|
+
"input_tokens": total_input_tokens, # Total input tokens across all steps
|
|
1958
|
+
"output_tokens": total_output_tokens, # Total output tokens across all steps
|
|
1959
|
+
"total_tokens": total_input_tokens
|
|
1960
|
+
+ total_output_tokens, # Combined token count
|
|
1961
|
+
"error_count": len(self.error_history),
|
|
1962
|
+
"error_history": self.error_history, # Include the full error history
|
|
1963
|
+
}
|
|
1964
|
+
|
|
1965
|
+
# Write trace to file if requested
|
|
1966
|
+
if trace:
|
|
1967
|
+
file_path = self._write_json_to_file(result, filename)
|
|
1968
|
+
result["output_file"] = file_path
|
|
1969
|
+
|
|
1970
|
+
logger.debug(f"Query processing complete: {result}")
|
|
1971
|
+
|
|
1972
|
+
# Store the result internally
|
|
1973
|
+
self.last_result = result
|
|
1974
|
+
|
|
1975
|
+
return result
|
|
1976
|
+
|
|
1977
|
+
def _post_process_tool_result(
|
|
1978
|
+
self, _tool_name: str, _tool_args: Dict[str, Any], _tool_result: Dict[str, Any]
|
|
1979
|
+
) -> None:
|
|
1980
|
+
"""
|
|
1981
|
+
Post-process the tool result for domain-specific handling.
|
|
1982
|
+
Override this in subclasses to provide domain-specific behavior.
|
|
1983
|
+
|
|
1984
|
+
Args:
|
|
1985
|
+
_tool_name: Name of the tool that was executed
|
|
1986
|
+
_tool_args: Arguments that were passed to the tool
|
|
1987
|
+
_tool_result: Result returned by the tool
|
|
1988
|
+
"""
|
|
1989
|
+
...
|
|
1990
|
+
|
|
1991
|
+
def display_result(
|
|
1992
|
+
self,
|
|
1993
|
+
title: str = "Result",
|
|
1994
|
+
result: Dict[str, Any] = None,
|
|
1995
|
+
print_result: bool = False,
|
|
1996
|
+
) -> None:
|
|
1997
|
+
"""
|
|
1998
|
+
Display the result and output file path information.
|
|
1999
|
+
|
|
2000
|
+
Args:
|
|
2001
|
+
title: Optional title for the result panel
|
|
2002
|
+
result: Optional result dictionary to display. If None, uses the last stored result.
|
|
2003
|
+
print_result: If True, print the result to the console
|
|
2004
|
+
"""
|
|
2005
|
+
# Use the provided result or fall back to the last stored result
|
|
2006
|
+
display_result = result if result is not None else self.last_result
|
|
2007
|
+
|
|
2008
|
+
if display_result is None:
|
|
2009
|
+
self.console.print_warning("No result available to display.")
|
|
2010
|
+
return
|
|
2011
|
+
|
|
2012
|
+
# Print the full result with syntax highlighting
|
|
2013
|
+
if print_result:
|
|
2014
|
+
self.console.pretty_print_json(display_result, title)
|
|
2015
|
+
|
|
2016
|
+
# If there's an output file, display its path after the result
|
|
2017
|
+
if "output_file" in display_result:
|
|
2018
|
+
self.console.print_info(
|
|
2019
|
+
f"Output written to: {display_result['output_file']}"
|
|
2020
|
+
)
|
|
2021
|
+
|
|
2022
|
+
def get_error_history(self) -> List[str]:
|
|
2023
|
+
"""
|
|
2024
|
+
Get the history of errors encountered by the agent.
|
|
2025
|
+
|
|
2026
|
+
Returns:
|
|
2027
|
+
List of error messages
|
|
2028
|
+
"""
|
|
2029
|
+
return self.error_history
|
|
2030
|
+
|
|
2031
|
+
def _validate_plan_required(self, parsed: Dict[str, Any], step: int) -> None:
|
|
2032
|
+
"""
|
|
2033
|
+
Validate that the response includes a plan when required by the agent.
|
|
2034
|
+
|
|
2035
|
+
Args:
|
|
2036
|
+
parsed: The parsed response from the LLM
|
|
2037
|
+
step: The current step number
|
|
2038
|
+
"""
|
|
2039
|
+
# Skip validation if we're not in planning mode or if we're already executing a plan
|
|
2040
|
+
if self.execution_state != self.STATE_PLANNING or self.current_plan is not None:
|
|
2041
|
+
return
|
|
2042
|
+
|
|
2043
|
+
# Allow simple single-tool operations without requiring a plan
|
|
2044
|
+
if "tool" in parsed and step == 1:
|
|
2045
|
+
tool_name = parsed.get("tool", "")
|
|
2046
|
+
# List of tools that can execute directly without a plan
|
|
2047
|
+
simple_tools = self.SIMPLE_TOOLS
|
|
2048
|
+
if tool_name in simple_tools:
|
|
2049
|
+
logger.debug(f"Allowing direct execution of simple tool: {tool_name}")
|
|
2050
|
+
return
|
|
2051
|
+
|
|
2052
|
+
# Check if plan is missing on the first step
|
|
2053
|
+
# BUT: Allow direct answers without plans (for simple conversational queries)
|
|
2054
|
+
if "plan" not in parsed and "answer" not in parsed and step == 1:
|
|
2055
|
+
warning_msg = f"No plan found in step {step} response. The agent should create a plan for all tasks."
|
|
2056
|
+
logger.warning(warning_msg)
|
|
2057
|
+
self.console.print_warning(warning_msg)
|
|
2058
|
+
|
|
2059
|
+
# For the first step, we'll add a flag to indicate we need to re-prompt for a plan
|
|
2060
|
+
parsed["needs_plan"] = True
|
|
2061
|
+
|
|
2062
|
+
# If there's a tool in the response, store it but don't execute it yet
|
|
2063
|
+
if "tool" in parsed:
|
|
2064
|
+
parsed["deferred_tool"] = parsed["tool"]
|
|
2065
|
+
parsed["deferred_tool_args"] = parsed.get("tool_args", {})
|
|
2066
|
+
# Remove the tool so it won't be executed
|
|
2067
|
+
del parsed["tool"]
|
|
2068
|
+
if "tool_args" in parsed:
|
|
2069
|
+
del parsed["tool_args"]
|
|
2070
|
+
|
|
2071
|
+
# Set state to indicate we need planning
|
|
2072
|
+
self.execution_state = self.STATE_PLANNING
|