amd-gaia 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- amd_gaia-0.14.1.dist-info/METADATA +768 -0
- amd_gaia-0.14.1.dist-info/RECORD +800 -0
- amd_gaia-0.14.1.dist-info/WHEEL +5 -0
- amd_gaia-0.14.1.dist-info/entry_points.txt +5 -0
- amd_gaia-0.14.1.dist-info/licenses/LICENSE.md +21 -0
- amd_gaia-0.14.1.dist-info/top_level.txt +1 -0
- gaia/__init__.py +2 -0
- gaia/agents/__init__.py +19 -0
- gaia/agents/base/__init__.py +9 -0
- gaia/agents/base/agent.py +2072 -0
- gaia/agents/base/api_agent.py +120 -0
- gaia/agents/base/console.py +1457 -0
- gaia/agents/base/mcp_agent.py +86 -0
- gaia/agents/base/tools.py +83 -0
- gaia/agents/blender/agent.py +556 -0
- gaia/agents/blender/agent_simple.py +135 -0
- gaia/agents/blender/app.py +211 -0
- gaia/agents/blender/app_simple.py +41 -0
- gaia/agents/blender/core/__init__.py +16 -0
- gaia/agents/blender/core/materials.py +506 -0
- gaia/agents/blender/core/objects.py +316 -0
- gaia/agents/blender/core/rendering.py +225 -0
- gaia/agents/blender/core/scene.py +220 -0
- gaia/agents/blender/core/view.py +146 -0
- gaia/agents/chat/__init__.py +9 -0
- gaia/agents/chat/agent.py +975 -0
- gaia/agents/chat/app.py +1058 -0
- gaia/agents/chat/session.py +508 -0
- gaia/agents/chat/tools/__init__.py +15 -0
- gaia/agents/chat/tools/file_tools.py +96 -0
- gaia/agents/chat/tools/rag_tools.py +1729 -0
- gaia/agents/chat/tools/shell_tools.py +436 -0
- gaia/agents/code/__init__.py +7 -0
- gaia/agents/code/agent.py +547 -0
- gaia/agents/code/app.py +266 -0
- gaia/agents/code/models.py +135 -0
- gaia/agents/code/orchestration/__init__.py +24 -0
- gaia/agents/code/orchestration/checklist_executor.py +1739 -0
- gaia/agents/code/orchestration/checklist_generator.py +709 -0
- gaia/agents/code/orchestration/factories/__init__.py +9 -0
- gaia/agents/code/orchestration/factories/base.py +63 -0
- gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -0
- gaia/agents/code/orchestration/factories/python_factory.py +106 -0
- gaia/agents/code/orchestration/orchestrator.py +610 -0
- gaia/agents/code/orchestration/project_analyzer.py +391 -0
- gaia/agents/code/orchestration/steps/__init__.py +67 -0
- gaia/agents/code/orchestration/steps/base.py +188 -0
- gaia/agents/code/orchestration/steps/error_handler.py +314 -0
- gaia/agents/code/orchestration/steps/nextjs.py +828 -0
- gaia/agents/code/orchestration/steps/python.py +307 -0
- gaia/agents/code/orchestration/template_catalog.py +463 -0
- gaia/agents/code/orchestration/workflows/__init__.py +14 -0
- gaia/agents/code/orchestration/workflows/base.py +80 -0
- gaia/agents/code/orchestration/workflows/nextjs.py +186 -0
- gaia/agents/code/orchestration/workflows/python.py +94 -0
- gaia/agents/code/prompts/__init__.py +11 -0
- gaia/agents/code/prompts/base_prompt.py +77 -0
- gaia/agents/code/prompts/code_patterns.py +1925 -0
- gaia/agents/code/prompts/nextjs_prompt.py +40 -0
- gaia/agents/code/prompts/python_prompt.py +109 -0
- gaia/agents/code/schema_inference.py +365 -0
- gaia/agents/code/system_prompt.py +41 -0
- gaia/agents/code/tools/__init__.py +42 -0
- gaia/agents/code/tools/cli_tools.py +1138 -0
- gaia/agents/code/tools/code_formatting.py +319 -0
- gaia/agents/code/tools/code_tools.py +769 -0
- gaia/agents/code/tools/error_fixing.py +1347 -0
- gaia/agents/code/tools/external_tools.py +180 -0
- gaia/agents/code/tools/file_io.py +845 -0
- gaia/agents/code/tools/prisma_tools.py +190 -0
- gaia/agents/code/tools/project_management.py +1016 -0
- gaia/agents/code/tools/testing.py +321 -0
- gaia/agents/code/tools/typescript_tools.py +122 -0
- gaia/agents/code/tools/validation_parsing.py +461 -0
- gaia/agents/code/tools/validation_tools.py +803 -0
- gaia/agents/code/tools/web_dev_tools.py +1744 -0
- gaia/agents/code/validators/__init__.py +16 -0
- gaia/agents/code/validators/antipattern_checker.py +241 -0
- gaia/agents/code/validators/ast_analyzer.py +197 -0
- gaia/agents/code/validators/requirements_validator.py +145 -0
- gaia/agents/code/validators/syntax_validator.py +171 -0
- gaia/agents/docker/__init__.py +7 -0
- gaia/agents/docker/agent.py +642 -0
- gaia/agents/jira/__init__.py +11 -0
- gaia/agents/jira/agent.py +894 -0
- gaia/agents/jira/jql_templates.py +299 -0
- gaia/agents/routing/__init__.py +7 -0
- gaia/agents/routing/agent.py +512 -0
- gaia/agents/routing/system_prompt.py +75 -0
- gaia/api/__init__.py +23 -0
- gaia/api/agent_registry.py +238 -0
- gaia/api/app.py +305 -0
- gaia/api/openai_server.py +575 -0
- gaia/api/schemas.py +186 -0
- gaia/api/sse_handler.py +370 -0
- gaia/apps/__init__.py +4 -0
- gaia/apps/llm/__init__.py +6 -0
- gaia/apps/llm/app.py +169 -0
- gaia/apps/summarize/app.py +633 -0
- gaia/apps/summarize/html_viewer.py +133 -0
- gaia/apps/summarize/pdf_formatter.py +284 -0
- gaia/audio/__init__.py +2 -0
- gaia/audio/audio_client.py +439 -0
- gaia/audio/audio_recorder.py +269 -0
- gaia/audio/kokoro_tts.py +599 -0
- gaia/audio/whisper_asr.py +432 -0
- gaia/chat/__init__.py +16 -0
- gaia/chat/app.py +430 -0
- gaia/chat/prompts.py +522 -0
- gaia/chat/sdk.py +1200 -0
- gaia/cli.py +5621 -0
- gaia/eval/batch_experiment.py +2332 -0
- gaia/eval/claude.py +542 -0
- gaia/eval/config.py +37 -0
- gaia/eval/email_generator.py +512 -0
- gaia/eval/eval.py +3179 -0
- gaia/eval/groundtruth.py +1130 -0
- gaia/eval/transcript_generator.py +582 -0
- gaia/eval/webapp/README.md +168 -0
- gaia/eval/webapp/node_modules/.bin/mime +16 -0
- gaia/eval/webapp/node_modules/.bin/mime.cmd +17 -0
- gaia/eval/webapp/node_modules/.bin/mime.ps1 +28 -0
- gaia/eval/webapp/node_modules/.package-lock.json +865 -0
- gaia/eval/webapp/node_modules/accepts/HISTORY.md +243 -0
- gaia/eval/webapp/node_modules/accepts/LICENSE +23 -0
- gaia/eval/webapp/node_modules/accepts/README.md +140 -0
- gaia/eval/webapp/node_modules/accepts/index.js +238 -0
- gaia/eval/webapp/node_modules/accepts/package.json +47 -0
- gaia/eval/webapp/node_modules/array-flatten/LICENSE +21 -0
- gaia/eval/webapp/node_modules/array-flatten/README.md +43 -0
- gaia/eval/webapp/node_modules/array-flatten/array-flatten.js +64 -0
- gaia/eval/webapp/node_modules/array-flatten/package.json +39 -0
- gaia/eval/webapp/node_modules/body-parser/HISTORY.md +672 -0
- gaia/eval/webapp/node_modules/body-parser/LICENSE +23 -0
- gaia/eval/webapp/node_modules/body-parser/README.md +476 -0
- gaia/eval/webapp/node_modules/body-parser/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/body-parser/index.js +156 -0
- gaia/eval/webapp/node_modules/body-parser/lib/read.js +205 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/json.js +247 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/raw.js +101 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/text.js +121 -0
- gaia/eval/webapp/node_modules/body-parser/lib/types/urlencoded.js +307 -0
- gaia/eval/webapp/node_modules/body-parser/package.json +56 -0
- gaia/eval/webapp/node_modules/bytes/History.md +97 -0
- gaia/eval/webapp/node_modules/bytes/LICENSE +23 -0
- gaia/eval/webapp/node_modules/bytes/Readme.md +152 -0
- gaia/eval/webapp/node_modules/bytes/index.js +170 -0
- gaia/eval/webapp/node_modules/bytes/package.json +42 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.eslintrc +17 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/.nycrc +9 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/CHANGELOG.md +30 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/LICENSE +21 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/README.md +62 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/actualApply.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/actualApply.js +10 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/applyBind.d.ts +19 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/applyBind.js +10 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionApply.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionApply.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionCall.d.ts +1 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/functionCall.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/index.d.ts +64 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/index.js +15 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/package.json +85 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/reflectApply.d.ts +3 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/reflectApply.js +4 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/test/index.js +63 -0
- gaia/eval/webapp/node_modules/call-bind-apply-helpers/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/call-bound/.eslintrc +13 -0
- gaia/eval/webapp/node_modules/call-bound/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/call-bound/.nycrc +9 -0
- gaia/eval/webapp/node_modules/call-bound/CHANGELOG.md +42 -0
- gaia/eval/webapp/node_modules/call-bound/LICENSE +21 -0
- gaia/eval/webapp/node_modules/call-bound/README.md +53 -0
- gaia/eval/webapp/node_modules/call-bound/index.d.ts +94 -0
- gaia/eval/webapp/node_modules/call-bound/index.js +19 -0
- gaia/eval/webapp/node_modules/call-bound/package.json +99 -0
- gaia/eval/webapp/node_modules/call-bound/test/index.js +61 -0
- gaia/eval/webapp/node_modules/call-bound/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/content-disposition/HISTORY.md +60 -0
- gaia/eval/webapp/node_modules/content-disposition/LICENSE +22 -0
- gaia/eval/webapp/node_modules/content-disposition/README.md +142 -0
- gaia/eval/webapp/node_modules/content-disposition/index.js +458 -0
- gaia/eval/webapp/node_modules/content-disposition/package.json +44 -0
- gaia/eval/webapp/node_modules/content-type/HISTORY.md +29 -0
- gaia/eval/webapp/node_modules/content-type/LICENSE +22 -0
- gaia/eval/webapp/node_modules/content-type/README.md +94 -0
- gaia/eval/webapp/node_modules/content-type/index.js +225 -0
- gaia/eval/webapp/node_modules/content-type/package.json +42 -0
- gaia/eval/webapp/node_modules/cookie/LICENSE +24 -0
- gaia/eval/webapp/node_modules/cookie/README.md +317 -0
- gaia/eval/webapp/node_modules/cookie/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/cookie/index.js +334 -0
- gaia/eval/webapp/node_modules/cookie/package.json +44 -0
- gaia/eval/webapp/node_modules/cookie-signature/.npmignore +4 -0
- gaia/eval/webapp/node_modules/cookie-signature/History.md +38 -0
- gaia/eval/webapp/node_modules/cookie-signature/Readme.md +42 -0
- gaia/eval/webapp/node_modules/cookie-signature/index.js +51 -0
- gaia/eval/webapp/node_modules/cookie-signature/package.json +18 -0
- gaia/eval/webapp/node_modules/debug/.coveralls.yml +1 -0
- gaia/eval/webapp/node_modules/debug/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/debug/.npmignore +9 -0
- gaia/eval/webapp/node_modules/debug/.travis.yml +14 -0
- gaia/eval/webapp/node_modules/debug/CHANGELOG.md +362 -0
- gaia/eval/webapp/node_modules/debug/LICENSE +19 -0
- gaia/eval/webapp/node_modules/debug/Makefile +50 -0
- gaia/eval/webapp/node_modules/debug/README.md +312 -0
- gaia/eval/webapp/node_modules/debug/component.json +19 -0
- gaia/eval/webapp/node_modules/debug/karma.conf.js +70 -0
- gaia/eval/webapp/node_modules/debug/node.js +1 -0
- gaia/eval/webapp/node_modules/debug/package.json +49 -0
- gaia/eval/webapp/node_modules/debug/src/browser.js +185 -0
- gaia/eval/webapp/node_modules/debug/src/debug.js +202 -0
- gaia/eval/webapp/node_modules/debug/src/index.js +10 -0
- gaia/eval/webapp/node_modules/debug/src/inspector-log.js +15 -0
- gaia/eval/webapp/node_modules/debug/src/node.js +248 -0
- gaia/eval/webapp/node_modules/depd/History.md +103 -0
- gaia/eval/webapp/node_modules/depd/LICENSE +22 -0
- gaia/eval/webapp/node_modules/depd/Readme.md +280 -0
- gaia/eval/webapp/node_modules/depd/index.js +538 -0
- gaia/eval/webapp/node_modules/depd/lib/browser/index.js +77 -0
- gaia/eval/webapp/node_modules/depd/package.json +45 -0
- gaia/eval/webapp/node_modules/destroy/LICENSE +23 -0
- gaia/eval/webapp/node_modules/destroy/README.md +63 -0
- gaia/eval/webapp/node_modules/destroy/index.js +209 -0
- gaia/eval/webapp/node_modules/destroy/package.json +48 -0
- gaia/eval/webapp/node_modules/dunder-proto/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/dunder-proto/.nycrc +13 -0
- gaia/eval/webapp/node_modules/dunder-proto/CHANGELOG.md +24 -0
- gaia/eval/webapp/node_modules/dunder-proto/LICENSE +21 -0
- gaia/eval/webapp/node_modules/dunder-proto/README.md +54 -0
- gaia/eval/webapp/node_modules/dunder-proto/get.d.ts +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/get.js +30 -0
- gaia/eval/webapp/node_modules/dunder-proto/package.json +76 -0
- gaia/eval/webapp/node_modules/dunder-proto/set.d.ts +5 -0
- gaia/eval/webapp/node_modules/dunder-proto/set.js +35 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/get.js +34 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/index.js +4 -0
- gaia/eval/webapp/node_modules/dunder-proto/test/set.js +50 -0
- gaia/eval/webapp/node_modules/dunder-proto/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/ee-first/LICENSE +22 -0
- gaia/eval/webapp/node_modules/ee-first/README.md +80 -0
- gaia/eval/webapp/node_modules/ee-first/index.js +95 -0
- gaia/eval/webapp/node_modules/ee-first/package.json +29 -0
- gaia/eval/webapp/node_modules/encodeurl/LICENSE +22 -0
- gaia/eval/webapp/node_modules/encodeurl/README.md +109 -0
- gaia/eval/webapp/node_modules/encodeurl/index.js +60 -0
- gaia/eval/webapp/node_modules/encodeurl/package.json +40 -0
- gaia/eval/webapp/node_modules/es-define-property/.eslintrc +13 -0
- gaia/eval/webapp/node_modules/es-define-property/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-define-property/.nycrc +9 -0
- gaia/eval/webapp/node_modules/es-define-property/CHANGELOG.md +29 -0
- gaia/eval/webapp/node_modules/es-define-property/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-define-property/README.md +49 -0
- gaia/eval/webapp/node_modules/es-define-property/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-define-property/index.js +14 -0
- gaia/eval/webapp/node_modules/es-define-property/package.json +81 -0
- gaia/eval/webapp/node_modules/es-define-property/test/index.js +56 -0
- gaia/eval/webapp/node_modules/es-define-property/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/es-errors/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/es-errors/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-errors/CHANGELOG.md +40 -0
- gaia/eval/webapp/node_modules/es-errors/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-errors/README.md +55 -0
- gaia/eval/webapp/node_modules/es-errors/eval.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/eval.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/index.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/package.json +80 -0
- gaia/eval/webapp/node_modules/es-errors/range.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/range.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/ref.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/ref.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/syntax.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/syntax.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/test/index.js +19 -0
- gaia/eval/webapp/node_modules/es-errors/tsconfig.json +49 -0
- gaia/eval/webapp/node_modules/es-errors/type.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/type.js +4 -0
- gaia/eval/webapp/node_modules/es-errors/uri.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-errors/uri.js +4 -0
- gaia/eval/webapp/node_modules/es-object-atoms/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/es-object-atoms/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/es-object-atoms/CHANGELOG.md +37 -0
- gaia/eval/webapp/node_modules/es-object-atoms/LICENSE +21 -0
- gaia/eval/webapp/node_modules/es-object-atoms/README.md +63 -0
- gaia/eval/webapp/node_modules/es-object-atoms/RequireObjectCoercible.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/RequireObjectCoercible.js +11 -0
- gaia/eval/webapp/node_modules/es-object-atoms/ToObject.d.ts +7 -0
- gaia/eval/webapp/node_modules/es-object-atoms/ToObject.js +10 -0
- gaia/eval/webapp/node_modules/es-object-atoms/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/index.js +4 -0
- gaia/eval/webapp/node_modules/es-object-atoms/isObject.d.ts +3 -0
- gaia/eval/webapp/node_modules/es-object-atoms/isObject.js +6 -0
- gaia/eval/webapp/node_modules/es-object-atoms/package.json +80 -0
- gaia/eval/webapp/node_modules/es-object-atoms/test/index.js +38 -0
- gaia/eval/webapp/node_modules/es-object-atoms/tsconfig.json +6 -0
- gaia/eval/webapp/node_modules/escape-html/LICENSE +24 -0
- gaia/eval/webapp/node_modules/escape-html/Readme.md +43 -0
- gaia/eval/webapp/node_modules/escape-html/index.js +78 -0
- gaia/eval/webapp/node_modules/escape-html/package.json +24 -0
- gaia/eval/webapp/node_modules/etag/HISTORY.md +83 -0
- gaia/eval/webapp/node_modules/etag/LICENSE +22 -0
- gaia/eval/webapp/node_modules/etag/README.md +159 -0
- gaia/eval/webapp/node_modules/etag/index.js +131 -0
- gaia/eval/webapp/node_modules/etag/package.json +47 -0
- gaia/eval/webapp/node_modules/express/History.md +3656 -0
- gaia/eval/webapp/node_modules/express/LICENSE +24 -0
- gaia/eval/webapp/node_modules/express/Readme.md +260 -0
- gaia/eval/webapp/node_modules/express/index.js +11 -0
- gaia/eval/webapp/node_modules/express/lib/application.js +661 -0
- gaia/eval/webapp/node_modules/express/lib/express.js +116 -0
- gaia/eval/webapp/node_modules/express/lib/middleware/init.js +43 -0
- gaia/eval/webapp/node_modules/express/lib/middleware/query.js +47 -0
- gaia/eval/webapp/node_modules/express/lib/request.js +525 -0
- gaia/eval/webapp/node_modules/express/lib/response.js +1179 -0
- gaia/eval/webapp/node_modules/express/lib/router/index.js +673 -0
- gaia/eval/webapp/node_modules/express/lib/router/layer.js +181 -0
- gaia/eval/webapp/node_modules/express/lib/router/route.js +230 -0
- gaia/eval/webapp/node_modules/express/lib/utils.js +303 -0
- gaia/eval/webapp/node_modules/express/lib/view.js +182 -0
- gaia/eval/webapp/node_modules/express/package.json +102 -0
- gaia/eval/webapp/node_modules/finalhandler/HISTORY.md +210 -0
- gaia/eval/webapp/node_modules/finalhandler/LICENSE +22 -0
- gaia/eval/webapp/node_modules/finalhandler/README.md +147 -0
- gaia/eval/webapp/node_modules/finalhandler/SECURITY.md +25 -0
- gaia/eval/webapp/node_modules/finalhandler/index.js +341 -0
- gaia/eval/webapp/node_modules/finalhandler/package.json +47 -0
- gaia/eval/webapp/node_modules/forwarded/HISTORY.md +21 -0
- gaia/eval/webapp/node_modules/forwarded/LICENSE +22 -0
- gaia/eval/webapp/node_modules/forwarded/README.md +57 -0
- gaia/eval/webapp/node_modules/forwarded/index.js +90 -0
- gaia/eval/webapp/node_modules/forwarded/package.json +45 -0
- gaia/eval/webapp/node_modules/fresh/HISTORY.md +70 -0
- gaia/eval/webapp/node_modules/fresh/LICENSE +23 -0
- gaia/eval/webapp/node_modules/fresh/README.md +119 -0
- gaia/eval/webapp/node_modules/fresh/index.js +137 -0
- gaia/eval/webapp/node_modules/fresh/package.json +46 -0
- gaia/eval/webapp/node_modules/fs/README.md +9 -0
- gaia/eval/webapp/node_modules/fs/package.json +20 -0
- gaia/eval/webapp/node_modules/function-bind/.eslintrc +21 -0
- gaia/eval/webapp/node_modules/function-bind/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/function-bind/.github/SECURITY.md +3 -0
- gaia/eval/webapp/node_modules/function-bind/.nycrc +13 -0
- gaia/eval/webapp/node_modules/function-bind/CHANGELOG.md +136 -0
- gaia/eval/webapp/node_modules/function-bind/LICENSE +20 -0
- gaia/eval/webapp/node_modules/function-bind/README.md +46 -0
- gaia/eval/webapp/node_modules/function-bind/implementation.js +84 -0
- gaia/eval/webapp/node_modules/function-bind/index.js +5 -0
- gaia/eval/webapp/node_modules/function-bind/package.json +87 -0
- gaia/eval/webapp/node_modules/function-bind/test/.eslintrc +9 -0
- gaia/eval/webapp/node_modules/function-bind/test/index.js +252 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.eslintrc +42 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/get-intrinsic/.nycrc +9 -0
- gaia/eval/webapp/node_modules/get-intrinsic/CHANGELOG.md +186 -0
- gaia/eval/webapp/node_modules/get-intrinsic/LICENSE +21 -0
- gaia/eval/webapp/node_modules/get-intrinsic/README.md +71 -0
- gaia/eval/webapp/node_modules/get-intrinsic/index.js +378 -0
- gaia/eval/webapp/node_modules/get-intrinsic/package.json +97 -0
- gaia/eval/webapp/node_modules/get-intrinsic/test/GetIntrinsic.js +274 -0
- gaia/eval/webapp/node_modules/get-proto/.eslintrc +10 -0
- gaia/eval/webapp/node_modules/get-proto/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/get-proto/.nycrc +9 -0
- gaia/eval/webapp/node_modules/get-proto/CHANGELOG.md +21 -0
- gaia/eval/webapp/node_modules/get-proto/LICENSE +21 -0
- gaia/eval/webapp/node_modules/get-proto/Object.getPrototypeOf.d.ts +5 -0
- gaia/eval/webapp/node_modules/get-proto/Object.getPrototypeOf.js +6 -0
- gaia/eval/webapp/node_modules/get-proto/README.md +50 -0
- gaia/eval/webapp/node_modules/get-proto/Reflect.getPrototypeOf.d.ts +3 -0
- gaia/eval/webapp/node_modules/get-proto/Reflect.getPrototypeOf.js +4 -0
- gaia/eval/webapp/node_modules/get-proto/index.d.ts +5 -0
- gaia/eval/webapp/node_modules/get-proto/index.js +27 -0
- gaia/eval/webapp/node_modules/get-proto/package.json +81 -0
- gaia/eval/webapp/node_modules/get-proto/test/index.js +68 -0
- gaia/eval/webapp/node_modules/get-proto/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/gopd/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/gopd/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/gopd/CHANGELOG.md +45 -0
- gaia/eval/webapp/node_modules/gopd/LICENSE +21 -0
- gaia/eval/webapp/node_modules/gopd/README.md +40 -0
- gaia/eval/webapp/node_modules/gopd/gOPD.d.ts +1 -0
- gaia/eval/webapp/node_modules/gopd/gOPD.js +4 -0
- gaia/eval/webapp/node_modules/gopd/index.d.ts +5 -0
- gaia/eval/webapp/node_modules/gopd/index.js +15 -0
- gaia/eval/webapp/node_modules/gopd/package.json +77 -0
- gaia/eval/webapp/node_modules/gopd/test/index.js +36 -0
- gaia/eval/webapp/node_modules/gopd/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/has-symbols/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/has-symbols/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/has-symbols/.nycrc +9 -0
- gaia/eval/webapp/node_modules/has-symbols/CHANGELOG.md +91 -0
- gaia/eval/webapp/node_modules/has-symbols/LICENSE +21 -0
- gaia/eval/webapp/node_modules/has-symbols/README.md +46 -0
- gaia/eval/webapp/node_modules/has-symbols/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/has-symbols/index.js +14 -0
- gaia/eval/webapp/node_modules/has-symbols/package.json +111 -0
- gaia/eval/webapp/node_modules/has-symbols/shams.d.ts +3 -0
- gaia/eval/webapp/node_modules/has-symbols/shams.js +45 -0
- gaia/eval/webapp/node_modules/has-symbols/test/index.js +22 -0
- gaia/eval/webapp/node_modules/has-symbols/test/shams/core-js.js +29 -0
- gaia/eval/webapp/node_modules/has-symbols/test/shams/get-own-property-symbols.js +29 -0
- gaia/eval/webapp/node_modules/has-symbols/test/tests.js +58 -0
- gaia/eval/webapp/node_modules/has-symbols/tsconfig.json +10 -0
- gaia/eval/webapp/node_modules/hasown/.eslintrc +5 -0
- gaia/eval/webapp/node_modules/hasown/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/hasown/.nycrc +13 -0
- gaia/eval/webapp/node_modules/hasown/CHANGELOG.md +40 -0
- gaia/eval/webapp/node_modules/hasown/LICENSE +21 -0
- gaia/eval/webapp/node_modules/hasown/README.md +40 -0
- gaia/eval/webapp/node_modules/hasown/index.d.ts +3 -0
- gaia/eval/webapp/node_modules/hasown/index.js +8 -0
- gaia/eval/webapp/node_modules/hasown/package.json +92 -0
- gaia/eval/webapp/node_modules/hasown/tsconfig.json +6 -0
- gaia/eval/webapp/node_modules/http-errors/HISTORY.md +180 -0
- gaia/eval/webapp/node_modules/http-errors/LICENSE +23 -0
- gaia/eval/webapp/node_modules/http-errors/README.md +169 -0
- gaia/eval/webapp/node_modules/http-errors/index.js +289 -0
- gaia/eval/webapp/node_modules/http-errors/package.json +50 -0
- gaia/eval/webapp/node_modules/iconv-lite/Changelog.md +162 -0
- gaia/eval/webapp/node_modules/iconv-lite/LICENSE +21 -0
- gaia/eval/webapp/node_modules/iconv-lite/README.md +156 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/dbcs-codec.js +555 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/dbcs-data.js +176 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/index.js +22 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/internal.js +188 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-codec.js +72 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-data-generated.js +451 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/sbcs-data.js +174 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/big5-added.json +122 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp936.json +264 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp949.json +273 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/cp950.json +177 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/eucjp.json +182 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/gb18030-ranges.json +1 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/gbk-added.json +55 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/tables/shiftjis.json +125 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/utf16.js +177 -0
- gaia/eval/webapp/node_modules/iconv-lite/encodings/utf7.js +290 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/bom-handling.js +52 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/extend-node.js +217 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/index.d.ts +24 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/index.js +153 -0
- gaia/eval/webapp/node_modules/iconv-lite/lib/streams.js +121 -0
- gaia/eval/webapp/node_modules/iconv-lite/package.json +46 -0
- gaia/eval/webapp/node_modules/inherits/LICENSE +16 -0
- gaia/eval/webapp/node_modules/inherits/README.md +42 -0
- gaia/eval/webapp/node_modules/inherits/inherits.js +9 -0
- gaia/eval/webapp/node_modules/inherits/inherits_browser.js +27 -0
- gaia/eval/webapp/node_modules/inherits/package.json +29 -0
- gaia/eval/webapp/node_modules/ipaddr.js/LICENSE +19 -0
- gaia/eval/webapp/node_modules/ipaddr.js/README.md +233 -0
- gaia/eval/webapp/node_modules/ipaddr.js/ipaddr.min.js +1 -0
- gaia/eval/webapp/node_modules/ipaddr.js/lib/ipaddr.js +673 -0
- gaia/eval/webapp/node_modules/ipaddr.js/lib/ipaddr.js.d.ts +68 -0
- gaia/eval/webapp/node_modules/ipaddr.js/package.json +35 -0
- gaia/eval/webapp/node_modules/math-intrinsics/.eslintrc +16 -0
- gaia/eval/webapp/node_modules/math-intrinsics/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/math-intrinsics/CHANGELOG.md +24 -0
- gaia/eval/webapp/node_modules/math-intrinsics/LICENSE +21 -0
- gaia/eval/webapp/node_modules/math-intrinsics/README.md +50 -0
- gaia/eval/webapp/node_modules/math-intrinsics/abs.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/abs.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxArrayLength.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxArrayLength.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxSafeInteger.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxSafeInteger.js +5 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxValue.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/constants/maxValue.js +5 -0
- gaia/eval/webapp/node_modules/math-intrinsics/floor.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/floor.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isFinite.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isFinite.js +12 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isInteger.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isInteger.js +16 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNaN.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNaN.js +6 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNegativeZero.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/isNegativeZero.js +6 -0
- gaia/eval/webapp/node_modules/math-intrinsics/max.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/max.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/min.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/min.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/mod.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/mod.js +9 -0
- gaia/eval/webapp/node_modules/math-intrinsics/package.json +86 -0
- gaia/eval/webapp/node_modules/math-intrinsics/pow.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/pow.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/round.d.ts +1 -0
- gaia/eval/webapp/node_modules/math-intrinsics/round.js +4 -0
- gaia/eval/webapp/node_modules/math-intrinsics/sign.d.ts +3 -0
- gaia/eval/webapp/node_modules/math-intrinsics/sign.js +11 -0
- gaia/eval/webapp/node_modules/math-intrinsics/test/index.js +192 -0
- gaia/eval/webapp/node_modules/math-intrinsics/tsconfig.json +3 -0
- gaia/eval/webapp/node_modules/media-typer/HISTORY.md +22 -0
- gaia/eval/webapp/node_modules/media-typer/LICENSE +22 -0
- gaia/eval/webapp/node_modules/media-typer/README.md +81 -0
- gaia/eval/webapp/node_modules/media-typer/index.js +270 -0
- gaia/eval/webapp/node_modules/media-typer/package.json +26 -0
- gaia/eval/webapp/node_modules/merge-descriptors/HISTORY.md +21 -0
- gaia/eval/webapp/node_modules/merge-descriptors/LICENSE +23 -0
- gaia/eval/webapp/node_modules/merge-descriptors/README.md +49 -0
- gaia/eval/webapp/node_modules/merge-descriptors/index.js +60 -0
- gaia/eval/webapp/node_modules/merge-descriptors/package.json +39 -0
- gaia/eval/webapp/node_modules/methods/HISTORY.md +29 -0
- gaia/eval/webapp/node_modules/methods/LICENSE +24 -0
- gaia/eval/webapp/node_modules/methods/README.md +51 -0
- gaia/eval/webapp/node_modules/methods/index.js +69 -0
- gaia/eval/webapp/node_modules/methods/package.json +36 -0
- gaia/eval/webapp/node_modules/mime/.npmignore +0 -0
- gaia/eval/webapp/node_modules/mime/CHANGELOG.md +164 -0
- gaia/eval/webapp/node_modules/mime/LICENSE +21 -0
- gaia/eval/webapp/node_modules/mime/README.md +90 -0
- gaia/eval/webapp/node_modules/mime/cli.js +8 -0
- gaia/eval/webapp/node_modules/mime/mime.js +108 -0
- gaia/eval/webapp/node_modules/mime/package.json +44 -0
- gaia/eval/webapp/node_modules/mime/src/build.js +53 -0
- gaia/eval/webapp/node_modules/mime/src/test.js +60 -0
- gaia/eval/webapp/node_modules/mime/types.json +1 -0
- gaia/eval/webapp/node_modules/mime-db/HISTORY.md +507 -0
- gaia/eval/webapp/node_modules/mime-db/LICENSE +23 -0
- gaia/eval/webapp/node_modules/mime-db/README.md +100 -0
- gaia/eval/webapp/node_modules/mime-db/db.json +8519 -0
- gaia/eval/webapp/node_modules/mime-db/index.js +12 -0
- gaia/eval/webapp/node_modules/mime-db/package.json +60 -0
- gaia/eval/webapp/node_modules/mime-types/HISTORY.md +397 -0
- gaia/eval/webapp/node_modules/mime-types/LICENSE +23 -0
- gaia/eval/webapp/node_modules/mime-types/README.md +113 -0
- gaia/eval/webapp/node_modules/mime-types/index.js +188 -0
- gaia/eval/webapp/node_modules/mime-types/package.json +44 -0
- gaia/eval/webapp/node_modules/ms/index.js +152 -0
- gaia/eval/webapp/node_modules/ms/license.md +21 -0
- gaia/eval/webapp/node_modules/ms/package.json +37 -0
- gaia/eval/webapp/node_modules/ms/readme.md +51 -0
- gaia/eval/webapp/node_modules/negotiator/HISTORY.md +108 -0
- gaia/eval/webapp/node_modules/negotiator/LICENSE +24 -0
- gaia/eval/webapp/node_modules/negotiator/README.md +203 -0
- gaia/eval/webapp/node_modules/negotiator/index.js +82 -0
- gaia/eval/webapp/node_modules/negotiator/lib/charset.js +169 -0
- gaia/eval/webapp/node_modules/negotiator/lib/encoding.js +184 -0
- gaia/eval/webapp/node_modules/negotiator/lib/language.js +179 -0
- gaia/eval/webapp/node_modules/negotiator/lib/mediaType.js +294 -0
- gaia/eval/webapp/node_modules/negotiator/package.json +42 -0
- gaia/eval/webapp/node_modules/object-inspect/.eslintrc +53 -0
- gaia/eval/webapp/node_modules/object-inspect/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/object-inspect/.nycrc +13 -0
- gaia/eval/webapp/node_modules/object-inspect/CHANGELOG.md +424 -0
- gaia/eval/webapp/node_modules/object-inspect/LICENSE +21 -0
- gaia/eval/webapp/node_modules/object-inspect/example/all.js +23 -0
- gaia/eval/webapp/node_modules/object-inspect/example/circular.js +6 -0
- gaia/eval/webapp/node_modules/object-inspect/example/fn.js +5 -0
- gaia/eval/webapp/node_modules/object-inspect/example/inspect.js +10 -0
- gaia/eval/webapp/node_modules/object-inspect/index.js +544 -0
- gaia/eval/webapp/node_modules/object-inspect/package-support.json +20 -0
- gaia/eval/webapp/node_modules/object-inspect/package.json +105 -0
- gaia/eval/webapp/node_modules/object-inspect/readme.markdown +84 -0
- gaia/eval/webapp/node_modules/object-inspect/test/bigint.js +58 -0
- gaia/eval/webapp/node_modules/object-inspect/test/browser/dom.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/circular.js +16 -0
- gaia/eval/webapp/node_modules/object-inspect/test/deep.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/element.js +53 -0
- gaia/eval/webapp/node_modules/object-inspect/test/err.js +48 -0
- gaia/eval/webapp/node_modules/object-inspect/test/fakes.js +29 -0
- gaia/eval/webapp/node_modules/object-inspect/test/fn.js +76 -0
- gaia/eval/webapp/node_modules/object-inspect/test/global.js +17 -0
- gaia/eval/webapp/node_modules/object-inspect/test/has.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/holes.js +15 -0
- gaia/eval/webapp/node_modules/object-inspect/test/indent-option.js +271 -0
- gaia/eval/webapp/node_modules/object-inspect/test/inspect.js +139 -0
- gaia/eval/webapp/node_modules/object-inspect/test/lowbyte.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/number.js +58 -0
- gaia/eval/webapp/node_modules/object-inspect/test/quoteStyle.js +26 -0
- gaia/eval/webapp/node_modules/object-inspect/test/toStringTag.js +40 -0
- gaia/eval/webapp/node_modules/object-inspect/test/undef.js +12 -0
- gaia/eval/webapp/node_modules/object-inspect/test/values.js +261 -0
- gaia/eval/webapp/node_modules/object-inspect/test-core-js.js +26 -0
- gaia/eval/webapp/node_modules/object-inspect/util.inspect.js +1 -0
- gaia/eval/webapp/node_modules/on-finished/HISTORY.md +98 -0
- gaia/eval/webapp/node_modules/on-finished/LICENSE +23 -0
- gaia/eval/webapp/node_modules/on-finished/README.md +162 -0
- gaia/eval/webapp/node_modules/on-finished/index.js +234 -0
- gaia/eval/webapp/node_modules/on-finished/package.json +39 -0
- gaia/eval/webapp/node_modules/parseurl/HISTORY.md +58 -0
- gaia/eval/webapp/node_modules/parseurl/LICENSE +24 -0
- gaia/eval/webapp/node_modules/parseurl/README.md +133 -0
- gaia/eval/webapp/node_modules/parseurl/index.js +158 -0
- gaia/eval/webapp/node_modules/parseurl/package.json +40 -0
- gaia/eval/webapp/node_modules/path/.npmignore +1 -0
- gaia/eval/webapp/node_modules/path/LICENSE +18 -0
- gaia/eval/webapp/node_modules/path/README.md +15 -0
- gaia/eval/webapp/node_modules/path/package.json +24 -0
- gaia/eval/webapp/node_modules/path/path.js +628 -0
- gaia/eval/webapp/node_modules/path-to-regexp/LICENSE +21 -0
- gaia/eval/webapp/node_modules/path-to-regexp/Readme.md +35 -0
- gaia/eval/webapp/node_modules/path-to-regexp/index.js +156 -0
- gaia/eval/webapp/node_modules/path-to-regexp/package.json +30 -0
- gaia/eval/webapp/node_modules/process/.eslintrc +21 -0
- gaia/eval/webapp/node_modules/process/LICENSE +22 -0
- gaia/eval/webapp/node_modules/process/README.md +26 -0
- gaia/eval/webapp/node_modules/process/browser.js +184 -0
- gaia/eval/webapp/node_modules/process/index.js +2 -0
- gaia/eval/webapp/node_modules/process/package.json +27 -0
- gaia/eval/webapp/node_modules/process/test.js +199 -0
- gaia/eval/webapp/node_modules/proxy-addr/HISTORY.md +161 -0
- gaia/eval/webapp/node_modules/proxy-addr/LICENSE +22 -0
- gaia/eval/webapp/node_modules/proxy-addr/README.md +139 -0
- gaia/eval/webapp/node_modules/proxy-addr/index.js +327 -0
- gaia/eval/webapp/node_modules/proxy-addr/package.json +47 -0
- gaia/eval/webapp/node_modules/qs/.editorconfig +46 -0
- gaia/eval/webapp/node_modules/qs/.eslintrc +38 -0
- gaia/eval/webapp/node_modules/qs/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/qs/.nycrc +13 -0
- gaia/eval/webapp/node_modules/qs/CHANGELOG.md +600 -0
- gaia/eval/webapp/node_modules/qs/LICENSE.md +29 -0
- gaia/eval/webapp/node_modules/qs/README.md +709 -0
- gaia/eval/webapp/node_modules/qs/dist/qs.js +90 -0
- gaia/eval/webapp/node_modules/qs/lib/formats.js +23 -0
- gaia/eval/webapp/node_modules/qs/lib/index.js +11 -0
- gaia/eval/webapp/node_modules/qs/lib/parse.js +296 -0
- gaia/eval/webapp/node_modules/qs/lib/stringify.js +351 -0
- gaia/eval/webapp/node_modules/qs/lib/utils.js +265 -0
- gaia/eval/webapp/node_modules/qs/package.json +91 -0
- gaia/eval/webapp/node_modules/qs/test/empty-keys-cases.js +267 -0
- gaia/eval/webapp/node_modules/qs/test/parse.js +1170 -0
- gaia/eval/webapp/node_modules/qs/test/stringify.js +1298 -0
- gaia/eval/webapp/node_modules/qs/test/utils.js +136 -0
- gaia/eval/webapp/node_modules/range-parser/HISTORY.md +56 -0
- gaia/eval/webapp/node_modules/range-parser/LICENSE +23 -0
- gaia/eval/webapp/node_modules/range-parser/README.md +84 -0
- gaia/eval/webapp/node_modules/range-parser/index.js +162 -0
- gaia/eval/webapp/node_modules/range-parser/package.json +44 -0
- gaia/eval/webapp/node_modules/raw-body/HISTORY.md +308 -0
- gaia/eval/webapp/node_modules/raw-body/LICENSE +22 -0
- gaia/eval/webapp/node_modules/raw-body/README.md +223 -0
- gaia/eval/webapp/node_modules/raw-body/SECURITY.md +24 -0
- gaia/eval/webapp/node_modules/raw-body/index.d.ts +87 -0
- gaia/eval/webapp/node_modules/raw-body/index.js +336 -0
- gaia/eval/webapp/node_modules/raw-body/package.json +49 -0
- gaia/eval/webapp/node_modules/safe-buffer/LICENSE +21 -0
- gaia/eval/webapp/node_modules/safe-buffer/README.md +584 -0
- gaia/eval/webapp/node_modules/safe-buffer/index.d.ts +187 -0
- gaia/eval/webapp/node_modules/safe-buffer/index.js +65 -0
- gaia/eval/webapp/node_modules/safe-buffer/package.json +51 -0
- gaia/eval/webapp/node_modules/safer-buffer/LICENSE +21 -0
- gaia/eval/webapp/node_modules/safer-buffer/Porting-Buffer.md +268 -0
- gaia/eval/webapp/node_modules/safer-buffer/Readme.md +156 -0
- gaia/eval/webapp/node_modules/safer-buffer/dangerous.js +58 -0
- gaia/eval/webapp/node_modules/safer-buffer/package.json +34 -0
- gaia/eval/webapp/node_modules/safer-buffer/safer.js +77 -0
- gaia/eval/webapp/node_modules/safer-buffer/tests.js +406 -0
- gaia/eval/webapp/node_modules/send/HISTORY.md +526 -0
- gaia/eval/webapp/node_modules/send/LICENSE +23 -0
- gaia/eval/webapp/node_modules/send/README.md +327 -0
- gaia/eval/webapp/node_modules/send/SECURITY.md +24 -0
- gaia/eval/webapp/node_modules/send/index.js +1142 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/HISTORY.md +14 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/LICENSE +22 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/README.md +128 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/index.js +60 -0
- gaia/eval/webapp/node_modules/send/node_modules/encodeurl/package.json +40 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/index.js +162 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/license.md +21 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/package.json +38 -0
- gaia/eval/webapp/node_modules/send/node_modules/ms/readme.md +59 -0
- gaia/eval/webapp/node_modules/send/package.json +62 -0
- gaia/eval/webapp/node_modules/serve-static/HISTORY.md +487 -0
- gaia/eval/webapp/node_modules/serve-static/LICENSE +25 -0
- gaia/eval/webapp/node_modules/serve-static/README.md +257 -0
- gaia/eval/webapp/node_modules/serve-static/index.js +209 -0
- gaia/eval/webapp/node_modules/serve-static/package.json +42 -0
- gaia/eval/webapp/node_modules/setprototypeof/LICENSE +13 -0
- gaia/eval/webapp/node_modules/setprototypeof/README.md +31 -0
- gaia/eval/webapp/node_modules/setprototypeof/index.d.ts +2 -0
- gaia/eval/webapp/node_modules/setprototypeof/index.js +17 -0
- gaia/eval/webapp/node_modules/setprototypeof/package.json +38 -0
- gaia/eval/webapp/node_modules/setprototypeof/test/index.js +24 -0
- gaia/eval/webapp/node_modules/side-channel/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel/.eslintrc +12 -0
- gaia/eval/webapp/node_modules/side-channel/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel/CHANGELOG.md +110 -0
- gaia/eval/webapp/node_modules/side-channel/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel/README.md +61 -0
- gaia/eval/webapp/node_modules/side-channel/index.d.ts +14 -0
- gaia/eval/webapp/node_modules/side-channel/index.js +43 -0
- gaia/eval/webapp/node_modules/side-channel/package.json +85 -0
- gaia/eval/webapp/node_modules/side-channel/test/index.js +104 -0
- gaia/eval/webapp/node_modules/side-channel/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-list/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-list/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/side-channel-list/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-list/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-list/CHANGELOG.md +15 -0
- gaia/eval/webapp/node_modules/side-channel-list/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-list/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-list/index.d.ts +13 -0
- gaia/eval/webapp/node_modules/side-channel-list/index.js +113 -0
- gaia/eval/webapp/node_modules/side-channel-list/list.d.ts +14 -0
- gaia/eval/webapp/node_modules/side-channel-list/package.json +77 -0
- gaia/eval/webapp/node_modules/side-channel-list/test/index.js +104 -0
- gaia/eval/webapp/node_modules/side-channel-list/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-map/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-map/.eslintrc +11 -0
- gaia/eval/webapp/node_modules/side-channel-map/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-map/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-map/CHANGELOG.md +22 -0
- gaia/eval/webapp/node_modules/side-channel-map/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-map/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-map/index.d.ts +15 -0
- gaia/eval/webapp/node_modules/side-channel-map/index.js +68 -0
- gaia/eval/webapp/node_modules/side-channel-map/package.json +80 -0
- gaia/eval/webapp/node_modules/side-channel-map/test/index.js +114 -0
- gaia/eval/webapp/node_modules/side-channel-map/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.editorconfig +9 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.eslintrc +12 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.github/FUNDING.yml +12 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/.nycrc +13 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/CHANGELOG.md +28 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/LICENSE +21 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/README.md +62 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/index.d.ts +15 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/index.js +84 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/package.json +87 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/test/index.js +114 -0
- gaia/eval/webapp/node_modules/side-channel-weakmap/tsconfig.json +9 -0
- gaia/eval/webapp/node_modules/statuses/HISTORY.md +82 -0
- gaia/eval/webapp/node_modules/statuses/LICENSE +23 -0
- gaia/eval/webapp/node_modules/statuses/README.md +136 -0
- gaia/eval/webapp/node_modules/statuses/codes.json +65 -0
- gaia/eval/webapp/node_modules/statuses/index.js +146 -0
- gaia/eval/webapp/node_modules/statuses/package.json +49 -0
- gaia/eval/webapp/node_modules/toidentifier/HISTORY.md +9 -0
- gaia/eval/webapp/node_modules/toidentifier/LICENSE +21 -0
- gaia/eval/webapp/node_modules/toidentifier/README.md +61 -0
- gaia/eval/webapp/node_modules/toidentifier/index.js +32 -0
- gaia/eval/webapp/node_modules/toidentifier/package.json +38 -0
- gaia/eval/webapp/node_modules/type-is/HISTORY.md +259 -0
- gaia/eval/webapp/node_modules/type-is/LICENSE +23 -0
- gaia/eval/webapp/node_modules/type-is/README.md +170 -0
- gaia/eval/webapp/node_modules/type-is/index.js +266 -0
- gaia/eval/webapp/node_modules/type-is/package.json +45 -0
- gaia/eval/webapp/node_modules/unpipe/HISTORY.md +4 -0
- gaia/eval/webapp/node_modules/unpipe/LICENSE +22 -0
- gaia/eval/webapp/node_modules/unpipe/README.md +43 -0
- gaia/eval/webapp/node_modules/unpipe/index.js +69 -0
- gaia/eval/webapp/node_modules/unpipe/package.json +27 -0
- gaia/eval/webapp/node_modules/util/LICENSE +18 -0
- gaia/eval/webapp/node_modules/util/README.md +15 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/LICENSE +16 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/README.md +42 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/inherits.js +7 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/inherits_browser.js +23 -0
- gaia/eval/webapp/node_modules/util/node_modules/inherits/package.json +29 -0
- gaia/eval/webapp/node_modules/util/package.json +35 -0
- gaia/eval/webapp/node_modules/util/support/isBuffer.js +3 -0
- gaia/eval/webapp/node_modules/util/support/isBufferBrowser.js +6 -0
- gaia/eval/webapp/node_modules/util/util.js +586 -0
- gaia/eval/webapp/node_modules/utils-merge/.npmignore +9 -0
- gaia/eval/webapp/node_modules/utils-merge/LICENSE +20 -0
- gaia/eval/webapp/node_modules/utils-merge/README.md +34 -0
- gaia/eval/webapp/node_modules/utils-merge/index.js +23 -0
- gaia/eval/webapp/node_modules/utils-merge/package.json +40 -0
- gaia/eval/webapp/node_modules/vary/HISTORY.md +39 -0
- gaia/eval/webapp/node_modules/vary/LICENSE +22 -0
- gaia/eval/webapp/node_modules/vary/README.md +101 -0
- gaia/eval/webapp/node_modules/vary/index.js +149 -0
- gaia/eval/webapp/node_modules/vary/package.json +43 -0
- gaia/eval/webapp/package-lock.json +875 -0
- gaia/eval/webapp/package.json +21 -0
- gaia/eval/webapp/public/app.js +3403 -0
- gaia/eval/webapp/public/index.html +88 -0
- gaia/eval/webapp/public/styles.css +3661 -0
- gaia/eval/webapp/server.js +416 -0
- gaia/eval/webapp/test-setup.js +73 -0
- gaia/llm/__init__.py +2 -0
- gaia/llm/lemonade_client.py +3083 -0
- gaia/llm/lemonade_manager.py +269 -0
- gaia/llm/llm_client.py +729 -0
- gaia/llm/vlm_client.py +307 -0
- gaia/logger.py +189 -0
- gaia/mcp/agent_mcp_server.py +245 -0
- gaia/mcp/blender_mcp_client.py +138 -0
- gaia/mcp/blender_mcp_server.py +648 -0
- gaia/mcp/context7_cache.py +332 -0
- gaia/mcp/external_services.py +518 -0
- gaia/mcp/mcp_bridge.py +550 -0
- gaia/mcp/servers/__init__.py +6 -0
- gaia/mcp/servers/docker_mcp.py +83 -0
- gaia/rag/__init__.py +10 -0
- gaia/rag/app.py +293 -0
- gaia/rag/demo.py +304 -0
- gaia/rag/pdf_utils.py +235 -0
- gaia/rag/sdk.py +2194 -0
- gaia/security.py +163 -0
- gaia/talk/app.py +289 -0
- gaia/talk/sdk.py +538 -0
- gaia/util.py +46 -0
- gaia/version.py +100 -0
gaia/chat/sdk.py
ADDED
|
@@ -0,0 +1,1200 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Copyright(C) 2024-2025 Advanced Micro Devices, Inc. All rights reserved.
|
|
3
|
+
# SPDX-License-Identifier: MIT
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
Gaia Chat SDK - Unified text chat integration with conversation history
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from collections import deque
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from gaia.chat.prompts import Prompts
|
|
16
|
+
from gaia.llm.lemonade_client import DEFAULT_MODEL_NAME
|
|
17
|
+
from gaia.llm.llm_client import LLMClient
|
|
18
|
+
from gaia.logger import get_logger
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class ChatConfig:
|
|
23
|
+
"""Configuration for ChatSDK."""
|
|
24
|
+
|
|
25
|
+
model: str = DEFAULT_MODEL_NAME
|
|
26
|
+
max_tokens: int = 512
|
|
27
|
+
system_prompt: Optional[str] = None
|
|
28
|
+
max_history_length: int = 4 # Number of conversation pairs to keep
|
|
29
|
+
show_stats: bool = False
|
|
30
|
+
logging_level: str = "INFO"
|
|
31
|
+
use_claude: bool = False # Use Claude API
|
|
32
|
+
use_chatgpt: bool = False # Use ChatGPT/OpenAI API
|
|
33
|
+
use_local_llm: bool = (
|
|
34
|
+
True # Use local LLM (computed as not use_claude and not use_chatgpt if not explicitly set)
|
|
35
|
+
)
|
|
36
|
+
claude_model: str = "claude-sonnet-4-20250514" # Claude model when use_claude=True
|
|
37
|
+
base_url: str = "http://localhost:8000/api/v1" # Lemonade server base URL
|
|
38
|
+
assistant_name: str = "gaia" # Name to use for the assistant in conversations
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class ChatResponse:
|
|
43
|
+
"""Response from chat operations."""
|
|
44
|
+
|
|
45
|
+
text: str
|
|
46
|
+
history: Optional[List[str]] = None
|
|
47
|
+
stats: Optional[Dict[str, Any]] = None
|
|
48
|
+
is_complete: bool = True
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class ChatSDK:
|
|
52
|
+
"""
|
|
53
|
+
Gaia Chat SDK - Unified text chat integration with conversation history.
|
|
54
|
+
|
|
55
|
+
This SDK provides a simple interface for integrating Gaia's text chat
|
|
56
|
+
capabilities with conversation memory into applications.
|
|
57
|
+
|
|
58
|
+
Example usage:
|
|
59
|
+
```python
|
|
60
|
+
from gaia.chat.sdk import ChatSDK, ChatConfig
|
|
61
|
+
|
|
62
|
+
# Create SDK instance
|
|
63
|
+
config = ChatConfig(model=DEFAULT_MODEL_NAME, show_stats=True)
|
|
64
|
+
chat = ChatSDK(config)
|
|
65
|
+
|
|
66
|
+
# Single message
|
|
67
|
+
response = await chat.send("Hello, how are you?")
|
|
68
|
+
print(response.text)
|
|
69
|
+
|
|
70
|
+
# Streaming chat
|
|
71
|
+
async for chunk in chat.send_stream("Tell me a story"):
|
|
72
|
+
print(chunk.text, end="", flush=True)
|
|
73
|
+
|
|
74
|
+
# Get conversation history
|
|
75
|
+
history = chat.get_history()
|
|
76
|
+
```
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self, config: Optional[ChatConfig] = None):
|
|
80
|
+
"""
|
|
81
|
+
Initialize the ChatSDK.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
config: Configuration options. If None, uses defaults.
|
|
85
|
+
"""
|
|
86
|
+
self.config = config or ChatConfig()
|
|
87
|
+
self.log = get_logger(__name__)
|
|
88
|
+
self.log.setLevel(getattr(logging, self.config.logging_level))
|
|
89
|
+
|
|
90
|
+
# Validate that both providers aren't specified
|
|
91
|
+
if self.config.use_claude and self.config.use_chatgpt:
|
|
92
|
+
raise ValueError(
|
|
93
|
+
"Cannot specify both use_claude and use_chatgpt. Please choose one."
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Initialize LLM client - it will compute use_local automatically
|
|
97
|
+
self.llm_client = LLMClient(
|
|
98
|
+
use_claude=self.config.use_claude,
|
|
99
|
+
use_openai=self.config.use_chatgpt,
|
|
100
|
+
claude_model=self.config.claude_model,
|
|
101
|
+
base_url=self.config.base_url,
|
|
102
|
+
system_prompt=None, # We handle system prompts through Prompts class
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Store conversation history
|
|
106
|
+
self.chat_history = deque(maxlen=self.config.max_history_length * 2)
|
|
107
|
+
|
|
108
|
+
# RAG support
|
|
109
|
+
self.rag = None
|
|
110
|
+
self.rag_enabled = False
|
|
111
|
+
|
|
112
|
+
self.log.debug("ChatSDK initialized")
|
|
113
|
+
|
|
114
|
+
def _format_history_for_context(self) -> str:
|
|
115
|
+
"""Format chat history for inclusion in LLM context using model-specific formatting."""
|
|
116
|
+
history_list = list(self.chat_history)
|
|
117
|
+
return Prompts.format_chat_history(
|
|
118
|
+
self.config.model,
|
|
119
|
+
history_list,
|
|
120
|
+
self.config.assistant_name,
|
|
121
|
+
self.config.system_prompt,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def _normalize_message_content(self, content: Any) -> str:
|
|
125
|
+
"""
|
|
126
|
+
Convert message content into a string for prompt construction, handling structured payloads.
|
|
127
|
+
"""
|
|
128
|
+
if isinstance(content, str):
|
|
129
|
+
return content
|
|
130
|
+
if isinstance(content, list):
|
|
131
|
+
parts = []
|
|
132
|
+
for entry in content:
|
|
133
|
+
if isinstance(entry, dict):
|
|
134
|
+
if entry.get("type") == "text":
|
|
135
|
+
parts.append(entry.get("text", ""))
|
|
136
|
+
else:
|
|
137
|
+
parts.append(json.dumps(entry))
|
|
138
|
+
else:
|
|
139
|
+
parts.append(str(entry))
|
|
140
|
+
return "\n".join(part for part in parts if part)
|
|
141
|
+
if isinstance(content, dict):
|
|
142
|
+
return json.dumps(content)
|
|
143
|
+
return str(content)
|
|
144
|
+
|
|
145
|
+
def send_messages(
|
|
146
|
+
self,
|
|
147
|
+
messages: List[Dict[str, Any]],
|
|
148
|
+
system_prompt: Optional[str] = None,
|
|
149
|
+
**kwargs,
|
|
150
|
+
) -> ChatResponse:
|
|
151
|
+
"""
|
|
152
|
+
Send a full conversation history and get a response.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
messages: List of message dicts with 'role' and 'content' keys
|
|
156
|
+
system_prompt: Optional system prompt to use (overrides config)
|
|
157
|
+
**kwargs: Additional arguments for LLM generation
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
ChatResponse with the complete response
|
|
161
|
+
"""
|
|
162
|
+
try:
|
|
163
|
+
# Convert messages to chat history format
|
|
164
|
+
chat_history = []
|
|
165
|
+
|
|
166
|
+
for msg in messages:
|
|
167
|
+
role = msg.get("role", "")
|
|
168
|
+
content = self._normalize_message_content(msg.get("content", ""))
|
|
169
|
+
|
|
170
|
+
if role == "user":
|
|
171
|
+
chat_history.append(f"user: {content}")
|
|
172
|
+
elif role == "assistant":
|
|
173
|
+
chat_history.append(f"assistant: {content}")
|
|
174
|
+
elif role == "tool":
|
|
175
|
+
tool_name = msg.get("name", "tool")
|
|
176
|
+
chat_history.append(f"assistant: [tool:{tool_name}] {content}")
|
|
177
|
+
# Skip system messages since they're passed separately
|
|
178
|
+
|
|
179
|
+
# Use provided system prompt or fall back to config default
|
|
180
|
+
effective_system_prompt = system_prompt or self.config.system_prompt
|
|
181
|
+
|
|
182
|
+
# Format according to model type
|
|
183
|
+
formatted_prompt = Prompts.format_chat_history(
|
|
184
|
+
model=self.config.model,
|
|
185
|
+
chat_history=chat_history,
|
|
186
|
+
assistant_name="assistant",
|
|
187
|
+
system_prompt=effective_system_prompt,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Debug logging
|
|
191
|
+
self.log.debug(f"Formatted prompt length: {len(formatted_prompt)} chars")
|
|
192
|
+
self.log.debug(
|
|
193
|
+
f"System prompt used: {effective_system_prompt[:100] if effective_system_prompt else 'None'}..."
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Set appropriate stop tokens based on model
|
|
197
|
+
model_lower = self.config.model.lower() if self.config.model else ""
|
|
198
|
+
if "qwen" in model_lower:
|
|
199
|
+
kwargs.setdefault("stop", ["<|im_end|>", "<|im_start|>"])
|
|
200
|
+
elif "llama" in model_lower:
|
|
201
|
+
kwargs.setdefault("stop", ["<|eot_id|>", "<|start_header_id|>"])
|
|
202
|
+
|
|
203
|
+
# Use generate with formatted prompt
|
|
204
|
+
response = self.llm_client.generate(
|
|
205
|
+
prompt=formatted_prompt,
|
|
206
|
+
model=self.config.model,
|
|
207
|
+
stream=False,
|
|
208
|
+
**kwargs,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# Prepare response data
|
|
212
|
+
stats = None
|
|
213
|
+
if self.config.show_stats:
|
|
214
|
+
stats = self.get_stats()
|
|
215
|
+
|
|
216
|
+
return ChatResponse(text=response, stats=stats, is_complete=True)
|
|
217
|
+
|
|
218
|
+
except ConnectionError as e:
|
|
219
|
+
# Re-raise connection errors with additional context
|
|
220
|
+
self.log.error(f"LLM connection error in send_messages: {e}")
|
|
221
|
+
raise ConnectionError(f"Failed to connect to LLM server: {e}") from e
|
|
222
|
+
except Exception as e:
|
|
223
|
+
self.log.error(f"Error in send_messages: {e}")
|
|
224
|
+
raise
|
|
225
|
+
|
|
226
|
+
def send_messages_stream(
|
|
227
|
+
self,
|
|
228
|
+
messages: List[Dict[str, Any]],
|
|
229
|
+
system_prompt: Optional[str] = None,
|
|
230
|
+
**kwargs,
|
|
231
|
+
):
|
|
232
|
+
"""
|
|
233
|
+
Send a full conversation history and get a streaming response.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
messages: List of message dicts with 'role' and 'content' keys
|
|
237
|
+
system_prompt: Optional system prompt to use (overrides config)
|
|
238
|
+
**kwargs: Additional arguments for LLM generation
|
|
239
|
+
|
|
240
|
+
Yields:
|
|
241
|
+
ChatResponse chunks as they arrive
|
|
242
|
+
"""
|
|
243
|
+
try:
|
|
244
|
+
# Convert messages to chat history format
|
|
245
|
+
chat_history = []
|
|
246
|
+
|
|
247
|
+
for msg in messages:
|
|
248
|
+
role = msg.get("role", "")
|
|
249
|
+
content = self._normalize_message_content(msg.get("content", ""))
|
|
250
|
+
|
|
251
|
+
if role == "user":
|
|
252
|
+
chat_history.append(f"user: {content}")
|
|
253
|
+
elif role == "assistant":
|
|
254
|
+
chat_history.append(f"assistant: {content}")
|
|
255
|
+
elif role == "tool":
|
|
256
|
+
tool_name = msg.get("name", "tool")
|
|
257
|
+
chat_history.append(f"assistant: [tool:{tool_name}] {content}")
|
|
258
|
+
# Skip system messages since they're passed separately
|
|
259
|
+
|
|
260
|
+
# Use provided system prompt or fall back to config default
|
|
261
|
+
effective_system_prompt = system_prompt or self.config.system_prompt
|
|
262
|
+
|
|
263
|
+
# Format according to model type
|
|
264
|
+
formatted_prompt = Prompts.format_chat_history(
|
|
265
|
+
model=self.config.model,
|
|
266
|
+
chat_history=chat_history,
|
|
267
|
+
assistant_name="assistant",
|
|
268
|
+
system_prompt=effective_system_prompt,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
# Debug logging
|
|
272
|
+
self.log.debug(f"Formatted prompt length: {len(formatted_prompt)} chars")
|
|
273
|
+
self.log.debug(
|
|
274
|
+
f"System prompt used: {effective_system_prompt[:100] if effective_system_prompt else 'None'}..."
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Set appropriate stop tokens based on model
|
|
278
|
+
model_lower = self.config.model.lower() if self.config.model else ""
|
|
279
|
+
if "qwen" in model_lower:
|
|
280
|
+
kwargs.setdefault("stop", ["<|im_end|>", "<|im_start|>"])
|
|
281
|
+
elif "llama" in model_lower:
|
|
282
|
+
kwargs.setdefault("stop", ["<|eot_id|>", "<|start_header_id|>"])
|
|
283
|
+
|
|
284
|
+
# Use generate with formatted prompt for streaming
|
|
285
|
+
full_response = ""
|
|
286
|
+
for chunk in self.llm_client.generate(
|
|
287
|
+
prompt=formatted_prompt, model=self.config.model, stream=True, **kwargs
|
|
288
|
+
):
|
|
289
|
+
full_response += chunk
|
|
290
|
+
yield ChatResponse(text=chunk, is_complete=False)
|
|
291
|
+
|
|
292
|
+
# Send final response with stats
|
|
293
|
+
# Always get stats for token tracking (show_stats controls display, not collection)
|
|
294
|
+
stats = self.get_stats()
|
|
295
|
+
|
|
296
|
+
yield ChatResponse(text="", stats=stats, is_complete=True)
|
|
297
|
+
|
|
298
|
+
except ConnectionError as e:
|
|
299
|
+
# Re-raise connection errors with additional context
|
|
300
|
+
self.log.error(f"LLM connection error in send_messages_stream: {e}")
|
|
301
|
+
raise ConnectionError(
|
|
302
|
+
f"Failed to connect to LLM server (streaming): {e}"
|
|
303
|
+
) from e
|
|
304
|
+
except Exception as e:
|
|
305
|
+
self.log.error(f"Error in send_messages_stream: {e}")
|
|
306
|
+
raise
|
|
307
|
+
|
|
308
|
+
def send(self, message: str, *, no_history: bool = False, **kwargs) -> ChatResponse:
|
|
309
|
+
"""
|
|
310
|
+
Send a message and get a complete response with conversation history.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
message: The message to send
|
|
314
|
+
no_history: When True, bypass stored chat history and send only this prompt
|
|
315
|
+
**kwargs: Additional arguments for LLM generation
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
ChatResponse with the complete response and updated history
|
|
319
|
+
"""
|
|
320
|
+
try:
|
|
321
|
+
if not message.strip():
|
|
322
|
+
raise ValueError("Message cannot be empty")
|
|
323
|
+
|
|
324
|
+
# Enhance message with RAG context if enabled
|
|
325
|
+
enhanced_message, _rag_metadata = self._enhance_with_rag(message.strip())
|
|
326
|
+
|
|
327
|
+
if no_history:
|
|
328
|
+
# Build a prompt using only the current enhanced message
|
|
329
|
+
full_prompt = Prompts.format_chat_history(
|
|
330
|
+
model=self.config.model,
|
|
331
|
+
chat_history=[f"user: {enhanced_message}"],
|
|
332
|
+
assistant_name=self.config.assistant_name,
|
|
333
|
+
system_prompt=self.config.system_prompt,
|
|
334
|
+
)
|
|
335
|
+
else:
|
|
336
|
+
# Add user message to history (use original message for history)
|
|
337
|
+
self.chat_history.append(f"user: {message.strip()}")
|
|
338
|
+
|
|
339
|
+
# Prepare prompt with conversation context (use enhanced message for LLM)
|
|
340
|
+
# Temporarily replace the last message with enhanced version for formatting
|
|
341
|
+
if self.rag_enabled and enhanced_message != message.strip():
|
|
342
|
+
# Save original and replace with enhanced version
|
|
343
|
+
original_last = self.chat_history.pop()
|
|
344
|
+
self.chat_history.append(f"user: {enhanced_message}")
|
|
345
|
+
full_prompt = self._format_history_for_context()
|
|
346
|
+
# Restore original for history
|
|
347
|
+
self.chat_history.pop()
|
|
348
|
+
self.chat_history.append(original_last)
|
|
349
|
+
else:
|
|
350
|
+
full_prompt = self._format_history_for_context()
|
|
351
|
+
|
|
352
|
+
# Generate response
|
|
353
|
+
generate_kwargs = dict(kwargs)
|
|
354
|
+
if "max_tokens" not in generate_kwargs:
|
|
355
|
+
generate_kwargs["max_tokens"] = self.config.max_tokens
|
|
356
|
+
|
|
357
|
+
# Note: Retry logic is now handled at the LLM client level
|
|
358
|
+
response = self.llm_client.generate(
|
|
359
|
+
full_prompt,
|
|
360
|
+
model=self.config.model,
|
|
361
|
+
**generate_kwargs,
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
# Add assistant message to history when tracking conversation
|
|
365
|
+
if not no_history:
|
|
366
|
+
self.chat_history.append(f"{self.config.assistant_name}: {response}")
|
|
367
|
+
|
|
368
|
+
# Prepare response data
|
|
369
|
+
stats = None
|
|
370
|
+
if self.config.show_stats:
|
|
371
|
+
stats = self.get_stats()
|
|
372
|
+
|
|
373
|
+
history = (
|
|
374
|
+
list(self.chat_history)
|
|
375
|
+
if kwargs.get("include_history", False)
|
|
376
|
+
else None
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
return ChatResponse(
|
|
380
|
+
text=response, history=history, stats=stats, is_complete=True
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
except Exception as e:
|
|
384
|
+
self.log.error(f"Error in send: {e}")
|
|
385
|
+
raise
|
|
386
|
+
|
|
387
|
+
def send_stream(self, message: str, **kwargs):
|
|
388
|
+
"""
|
|
389
|
+
Send a message and get a streaming response with conversation history.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
message: The message to send
|
|
393
|
+
**kwargs: Additional arguments for LLM generation
|
|
394
|
+
|
|
395
|
+
Yields:
|
|
396
|
+
ChatResponse chunks as they arrive
|
|
397
|
+
"""
|
|
398
|
+
try:
|
|
399
|
+
if not message.strip():
|
|
400
|
+
raise ValueError("Message cannot be empty")
|
|
401
|
+
|
|
402
|
+
# Enhance message with RAG context if enabled
|
|
403
|
+
enhanced_message, _rag_metadata = self._enhance_with_rag(message.strip())
|
|
404
|
+
|
|
405
|
+
# Add user message to history (use original message for history)
|
|
406
|
+
self.chat_history.append(f"user: {message.strip()}")
|
|
407
|
+
|
|
408
|
+
# Prepare prompt with conversation context (use enhanced message for LLM)
|
|
409
|
+
# Temporarily replace the last message with enhanced version for formatting
|
|
410
|
+
if self.rag_enabled and enhanced_message != message.strip():
|
|
411
|
+
# Save original and replace with enhanced version
|
|
412
|
+
original_last = self.chat_history.pop()
|
|
413
|
+
self.chat_history.append(f"user: {enhanced_message}")
|
|
414
|
+
full_prompt = self._format_history_for_context()
|
|
415
|
+
# Restore original for history
|
|
416
|
+
self.chat_history.pop()
|
|
417
|
+
self.chat_history.append(original_last)
|
|
418
|
+
else:
|
|
419
|
+
full_prompt = self._format_history_for_context()
|
|
420
|
+
|
|
421
|
+
# Generate streaming response
|
|
422
|
+
generate_kwargs = dict(kwargs)
|
|
423
|
+
if "max_tokens" not in generate_kwargs:
|
|
424
|
+
generate_kwargs["max_tokens"] = self.config.max_tokens
|
|
425
|
+
|
|
426
|
+
full_response = ""
|
|
427
|
+
for chunk in self.llm_client.generate(
|
|
428
|
+
full_prompt, model=self.config.model, stream=True, **generate_kwargs
|
|
429
|
+
):
|
|
430
|
+
full_response += chunk
|
|
431
|
+
yield ChatResponse(text=chunk, is_complete=False)
|
|
432
|
+
|
|
433
|
+
# Add complete assistant message to history
|
|
434
|
+
self.chat_history.append(f"{self.config.assistant_name}: {full_response}")
|
|
435
|
+
|
|
436
|
+
# Send final response with stats and history if requested
|
|
437
|
+
stats = None
|
|
438
|
+
if self.config.show_stats:
|
|
439
|
+
stats = self.get_stats()
|
|
440
|
+
|
|
441
|
+
history = (
|
|
442
|
+
list(self.chat_history)
|
|
443
|
+
if kwargs.get("include_history", False)
|
|
444
|
+
else None
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
yield ChatResponse(text="", history=history, stats=stats, is_complete=True)
|
|
448
|
+
|
|
449
|
+
except Exception as e:
|
|
450
|
+
self.log.error(f"Error in send_stream: {e}")
|
|
451
|
+
raise
|
|
452
|
+
|
|
453
|
+
def get_history(self) -> List[str]:
|
|
454
|
+
"""
|
|
455
|
+
Get the current conversation history.
|
|
456
|
+
|
|
457
|
+
Returns:
|
|
458
|
+
List of conversation entries in "role: message" format
|
|
459
|
+
"""
|
|
460
|
+
return list(self.chat_history)
|
|
461
|
+
|
|
462
|
+
def clear_history(self) -> None:
|
|
463
|
+
"""Clear the conversation history."""
|
|
464
|
+
self.chat_history.clear()
|
|
465
|
+
self.log.debug("Chat history cleared")
|
|
466
|
+
|
|
467
|
+
def get_formatted_history(self) -> List[Dict[str, str]]:
|
|
468
|
+
"""
|
|
469
|
+
Get conversation history in structured format.
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
List of dictionaries with 'role' and 'message' keys
|
|
473
|
+
"""
|
|
474
|
+
formatted = []
|
|
475
|
+
assistant_prefix = f"{self.config.assistant_name}: "
|
|
476
|
+
|
|
477
|
+
for entry in self.chat_history:
|
|
478
|
+
if entry.startswith("user: "):
|
|
479
|
+
role, message = "user", entry[6:]
|
|
480
|
+
formatted.append({"role": role, "message": message})
|
|
481
|
+
elif entry.startswith(assistant_prefix):
|
|
482
|
+
role, message = (
|
|
483
|
+
self.config.assistant_name,
|
|
484
|
+
entry[len(assistant_prefix) :],
|
|
485
|
+
)
|
|
486
|
+
formatted.append({"role": role, "message": message})
|
|
487
|
+
elif ": " in entry:
|
|
488
|
+
# Fallback for any other format
|
|
489
|
+
role, message = entry.split(": ", 1)
|
|
490
|
+
formatted.append({"role": role, "message": message})
|
|
491
|
+
else:
|
|
492
|
+
formatted.append({"role": "unknown", "message": entry})
|
|
493
|
+
return formatted
|
|
494
|
+
|
|
495
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
496
|
+
"""
|
|
497
|
+
Get performance statistics.
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
Dictionary of performance stats
|
|
501
|
+
"""
|
|
502
|
+
try:
|
|
503
|
+
return self.llm_client.get_performance_stats() or {}
|
|
504
|
+
except Exception as e:
|
|
505
|
+
self.log.warning(f"Failed to get stats: {e}")
|
|
506
|
+
return {}
|
|
507
|
+
|
|
508
|
+
def get_system_prompt(self) -> Optional[str]:
|
|
509
|
+
"""
|
|
510
|
+
Get the current system prompt.
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Current system prompt or None if not set
|
|
514
|
+
"""
|
|
515
|
+
return self.config.system_prompt
|
|
516
|
+
|
|
517
|
+
def set_system_prompt(self, system_prompt: Optional[str]) -> None:
|
|
518
|
+
"""
|
|
519
|
+
Set the system prompt for future conversations.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
system_prompt: New system prompt to use, or None to clear it
|
|
523
|
+
"""
|
|
524
|
+
self.config.system_prompt = system_prompt
|
|
525
|
+
self.log.debug(
|
|
526
|
+
f"System prompt updated: {system_prompt[:100] if system_prompt else 'None'}..."
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
def display_stats(self, stats: Optional[Dict[str, Any]] = None) -> None:
|
|
530
|
+
"""
|
|
531
|
+
Display performance statistics in a formatted way.
|
|
532
|
+
|
|
533
|
+
Args:
|
|
534
|
+
stats: Optional stats dictionary. If None, gets current stats.
|
|
535
|
+
"""
|
|
536
|
+
if stats is None:
|
|
537
|
+
stats = self.get_stats()
|
|
538
|
+
|
|
539
|
+
if stats:
|
|
540
|
+
print("\n" + "=" * 30)
|
|
541
|
+
print("Performance Statistics:")
|
|
542
|
+
print("=" * 30)
|
|
543
|
+
for key, value in stats.items():
|
|
544
|
+
if isinstance(value, float):
|
|
545
|
+
if "time" in key.lower():
|
|
546
|
+
print(f" {key}: {value:.3f}s")
|
|
547
|
+
elif "tokens_per_second" in key.lower():
|
|
548
|
+
print(f" {key}: {value:.2f} tokens/s")
|
|
549
|
+
else:
|
|
550
|
+
print(f" {key}: {value:.4f}")
|
|
551
|
+
elif isinstance(value, int):
|
|
552
|
+
if "tokens" in key.lower():
|
|
553
|
+
print(f" {key}: {value:,} tokens")
|
|
554
|
+
else:
|
|
555
|
+
print(f" {key}: {value}")
|
|
556
|
+
else:
|
|
557
|
+
print(f" {key}: {value}")
|
|
558
|
+
print("=" * 30)
|
|
559
|
+
else:
|
|
560
|
+
print("No statistics available.")
|
|
561
|
+
|
|
562
|
+
async def start_interactive_session(self) -> None:
|
|
563
|
+
"""
|
|
564
|
+
Start an interactive chat session with conversation history.
|
|
565
|
+
|
|
566
|
+
This provides a full CLI-style interactive experience with commands
|
|
567
|
+
for managing conversation history and viewing statistics.
|
|
568
|
+
"""
|
|
569
|
+
print("=" * 50)
|
|
570
|
+
print("Interactive Chat Session Started")
|
|
571
|
+
print(f"Using model: {self.config.model}")
|
|
572
|
+
print("Type 'quit', 'exit', or 'bye' to end the conversation")
|
|
573
|
+
print("Commands:")
|
|
574
|
+
print(" /clear - clear conversation history")
|
|
575
|
+
print(" /history - show conversation history")
|
|
576
|
+
print(" /stats - show performance statistics")
|
|
577
|
+
print(" /help - show this help message")
|
|
578
|
+
print("=" * 50)
|
|
579
|
+
|
|
580
|
+
while True:
|
|
581
|
+
try:
|
|
582
|
+
user_input = input("\nYou: ").strip()
|
|
583
|
+
|
|
584
|
+
if user_input.lower() in ["quit", "exit", "bye"]:
|
|
585
|
+
print("\nGoodbye!")
|
|
586
|
+
break
|
|
587
|
+
elif user_input.lower() == "/clear":
|
|
588
|
+
self.clear_history()
|
|
589
|
+
print("Conversation history cleared.")
|
|
590
|
+
continue
|
|
591
|
+
elif user_input.lower() == "/history":
|
|
592
|
+
history = self.get_formatted_history()
|
|
593
|
+
if not history:
|
|
594
|
+
print("No conversation history.")
|
|
595
|
+
else:
|
|
596
|
+
print("\n" + "=" * 30)
|
|
597
|
+
print("Conversation History:")
|
|
598
|
+
print("=" * 30)
|
|
599
|
+
for entry in history:
|
|
600
|
+
print(f"{entry['role'].title()}: {entry['message']}")
|
|
601
|
+
print("=" * 30)
|
|
602
|
+
continue
|
|
603
|
+
elif user_input.lower() == "/stats":
|
|
604
|
+
self.display_stats()
|
|
605
|
+
continue
|
|
606
|
+
elif user_input.lower() == "/help":
|
|
607
|
+
print("\n" + "=" * 40)
|
|
608
|
+
print("Available Commands:")
|
|
609
|
+
print("=" * 40)
|
|
610
|
+
print(" /clear - clear conversation history")
|
|
611
|
+
print(" /history - show conversation history")
|
|
612
|
+
print(" /stats - show performance statistics")
|
|
613
|
+
print(" /help - show this help message")
|
|
614
|
+
print("\nTo exit: type 'quit', 'exit', or 'bye'")
|
|
615
|
+
print("=" * 40)
|
|
616
|
+
continue
|
|
617
|
+
elif not user_input:
|
|
618
|
+
print("Please enter a message.")
|
|
619
|
+
continue
|
|
620
|
+
|
|
621
|
+
print(f"\n{self.config.assistant_name.title()}: ", end="", flush=True)
|
|
622
|
+
|
|
623
|
+
# Generate and stream response
|
|
624
|
+
for chunk in self.send_stream(user_input):
|
|
625
|
+
if not chunk.is_complete:
|
|
626
|
+
print(chunk.text, end="", flush=True)
|
|
627
|
+
else:
|
|
628
|
+
# Show stats if configured and available
|
|
629
|
+
if self.config.show_stats and chunk.stats:
|
|
630
|
+
self.display_stats(chunk.stats)
|
|
631
|
+
print() # Add newline after response
|
|
632
|
+
|
|
633
|
+
except KeyboardInterrupt:
|
|
634
|
+
print("\n\nChat interrupted. Goodbye!")
|
|
635
|
+
break
|
|
636
|
+
except Exception as e:
|
|
637
|
+
print(f"\nError: {e}")
|
|
638
|
+
raise
|
|
639
|
+
|
|
640
|
+
def update_config(self, **kwargs) -> None:
|
|
641
|
+
"""
|
|
642
|
+
Update configuration dynamically.
|
|
643
|
+
|
|
644
|
+
Args:
|
|
645
|
+
**kwargs: Configuration parameters to update
|
|
646
|
+
"""
|
|
647
|
+
# Update our config
|
|
648
|
+
for key, value in kwargs.items():
|
|
649
|
+
if hasattr(self.config, key):
|
|
650
|
+
setattr(self.config, key, value)
|
|
651
|
+
|
|
652
|
+
# Handle special cases
|
|
653
|
+
if "max_history_length" in kwargs:
|
|
654
|
+
# Create new deque with updated maxlen
|
|
655
|
+
old_history = list(self.chat_history)
|
|
656
|
+
new_maxlen = kwargs["max_history_length"] * 2
|
|
657
|
+
self.chat_history = deque(old_history, maxlen=new_maxlen)
|
|
658
|
+
|
|
659
|
+
if "system_prompt" in kwargs:
|
|
660
|
+
# System prompt is handled through Prompts class, not directly
|
|
661
|
+
pass
|
|
662
|
+
|
|
663
|
+
if "assistant_name" in kwargs:
|
|
664
|
+
# Assistant name change affects history display but not underlying storage
|
|
665
|
+
# since we dynamically parse the history based on current assistant_name
|
|
666
|
+
pass
|
|
667
|
+
|
|
668
|
+
@property
|
|
669
|
+
def history_length(self) -> int:
|
|
670
|
+
"""Get the current number of conversation entries."""
|
|
671
|
+
return len(self.chat_history)
|
|
672
|
+
|
|
673
|
+
@property
|
|
674
|
+
def conversation_pairs(self) -> int:
|
|
675
|
+
"""Get the number of conversation pairs (user + assistant)."""
|
|
676
|
+
return len(self.chat_history) // 2
|
|
677
|
+
|
|
678
|
+
def enable_rag(self, documents: Optional[List[str]] = None, **rag_kwargs):
|
|
679
|
+
"""
|
|
680
|
+
Enable RAG (Retrieval-Augmented Generation) for document-based chat.
|
|
681
|
+
|
|
682
|
+
Args:
|
|
683
|
+
documents: List of PDF file paths to index
|
|
684
|
+
**rag_kwargs: Additional RAG configuration options
|
|
685
|
+
"""
|
|
686
|
+
try:
|
|
687
|
+
from gaia.rag.sdk import RAGSDK, RAGConfig
|
|
688
|
+
except ImportError:
|
|
689
|
+
raise ImportError(
|
|
690
|
+
'RAG dependencies not installed. Install with: pip install ".[rag]"'
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
# Create RAG config matching chat config
|
|
694
|
+
rag_config = RAGConfig(
|
|
695
|
+
model=self.config.model,
|
|
696
|
+
show_stats=self.config.show_stats,
|
|
697
|
+
use_local_llm=self.config.use_local_llm,
|
|
698
|
+
**rag_kwargs,
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
self.rag = RAGSDK(rag_config)
|
|
702
|
+
self.rag_enabled = True
|
|
703
|
+
|
|
704
|
+
# Index documents if provided
|
|
705
|
+
if documents:
|
|
706
|
+
for doc_path in documents:
|
|
707
|
+
self.log.info(f"Indexing document: {doc_path}")
|
|
708
|
+
result = self.rag.index_document(doc_path)
|
|
709
|
+
|
|
710
|
+
if result:
|
|
711
|
+
self.log.info(f"Successfully indexed: {doc_path}")
|
|
712
|
+
else:
|
|
713
|
+
self.log.warning(f"Failed to index document: {doc_path}")
|
|
714
|
+
|
|
715
|
+
self.log.info(
|
|
716
|
+
f"RAG enabled with {len(documents) if documents else 0} documents"
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
def disable_rag(self):
|
|
720
|
+
"""Disable RAG functionality."""
|
|
721
|
+
self.rag = None
|
|
722
|
+
self.rag_enabled = False
|
|
723
|
+
self.log.info("RAG disabled")
|
|
724
|
+
|
|
725
|
+
def add_document(self, document_path: str) -> bool:
|
|
726
|
+
"""
|
|
727
|
+
Add a document to the RAG index.
|
|
728
|
+
|
|
729
|
+
Args:
|
|
730
|
+
document_path: Path to PDF file to index
|
|
731
|
+
|
|
732
|
+
Returns:
|
|
733
|
+
True if indexing succeeded
|
|
734
|
+
"""
|
|
735
|
+
if not self.rag_enabled or not self.rag:
|
|
736
|
+
raise ValueError("RAG not enabled. Call enable_rag() first.")
|
|
737
|
+
|
|
738
|
+
return self.rag.index_document(document_path)
|
|
739
|
+
|
|
740
|
+
def _estimate_tokens(self, text: str) -> int:
|
|
741
|
+
"""
|
|
742
|
+
Estimate the number of tokens in text.
|
|
743
|
+
Uses rough approximation of 4 characters per token.
|
|
744
|
+
|
|
745
|
+
Args:
|
|
746
|
+
text: The text to estimate tokens for
|
|
747
|
+
|
|
748
|
+
Returns:
|
|
749
|
+
Estimated token count
|
|
750
|
+
"""
|
|
751
|
+
# Rough approximation: ~4 characters per token for English text
|
|
752
|
+
# This is conservative to avoid overrunning context
|
|
753
|
+
return len(text) // 4
|
|
754
|
+
|
|
755
|
+
def summarize_conversation_history(self, max_history_tokens: int) -> Optional[str]:
|
|
756
|
+
"""
|
|
757
|
+
Summarize conversation history when it exceeds the token budget.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
max_history_tokens: Maximum allowed tokens for stored history
|
|
761
|
+
|
|
762
|
+
Returns:
|
|
763
|
+
The generated summary (when summarization occurred) or None
|
|
764
|
+
"""
|
|
765
|
+
if max_history_tokens <= 0:
|
|
766
|
+
raise ValueError("max_history_tokens must be positive")
|
|
767
|
+
|
|
768
|
+
history_entries = list(self.chat_history)
|
|
769
|
+
if not history_entries:
|
|
770
|
+
return None
|
|
771
|
+
|
|
772
|
+
history_text = "\n".join(history_entries)
|
|
773
|
+
history_tokens = self._estimate_tokens(history_text)
|
|
774
|
+
|
|
775
|
+
if history_tokens <= max_history_tokens:
|
|
776
|
+
print(
|
|
777
|
+
"History tokens are less than max history tokens, so no summarization is needed"
|
|
778
|
+
)
|
|
779
|
+
return None
|
|
780
|
+
|
|
781
|
+
print(
|
|
782
|
+
"History tokens are greater than max history tokens, so summarization is needed"
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
self.log.info(
|
|
786
|
+
"Conversation history (~%d tokens) exceeds budget (%d). Summarizing...",
|
|
787
|
+
history_tokens,
|
|
788
|
+
max_history_tokens,
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
summary_prompt = (
|
|
792
|
+
"Summarize the following conversation between a user and the GAIA web "
|
|
793
|
+
"development agent. Preserve:\n"
|
|
794
|
+
"- The app requirements and inferred schema/data models\n"
|
|
795
|
+
"- Key implementation details already completed\n"
|
|
796
|
+
"- Outstanding issues, validation failures, or TODOs (quote error/warning text verbatim)\n"
|
|
797
|
+
"- Any constraints or preferences the user emphasized\n\n"
|
|
798
|
+
"Write the summary in under 400 tokens, using concise paragraphs, and include the exact text of any warnings/errors so future fixes have full context."
|
|
799
|
+
)
|
|
800
|
+
full_prompt = (
|
|
801
|
+
f"{summary_prompt}\n\nConversation History:\n{history_text}\n\nSummary:"
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
try:
|
|
805
|
+
summary = self.llm_client.generate(
|
|
806
|
+
full_prompt,
|
|
807
|
+
model=self.config.model,
|
|
808
|
+
max_tokens=min(self.config.max_tokens, 2048),
|
|
809
|
+
timeout=1200,
|
|
810
|
+
)
|
|
811
|
+
except Exception as exc: # pylint: disable=broad-exception-caught
|
|
812
|
+
self.log.error("Failed to summarize conversation history: %s", exc)
|
|
813
|
+
return None
|
|
814
|
+
|
|
815
|
+
summary = summary.strip()
|
|
816
|
+
if not summary:
|
|
817
|
+
self.log.warning("Summarization returned empty content; keeping history.")
|
|
818
|
+
return None
|
|
819
|
+
|
|
820
|
+
self.chat_history.clear()
|
|
821
|
+
self.chat_history.append(
|
|
822
|
+
f"{self.config.assistant_name}: Conversation summary so far:\n{summary}"
|
|
823
|
+
)
|
|
824
|
+
return summary
|
|
825
|
+
|
|
826
|
+
def _truncate_rag_context(self, context: str, max_tokens: int) -> str:
|
|
827
|
+
"""
|
|
828
|
+
Truncate RAG context to fit within token budget.
|
|
829
|
+
|
|
830
|
+
Args:
|
|
831
|
+
context: The RAG context to truncate
|
|
832
|
+
max_tokens: Maximum tokens allowed
|
|
833
|
+
|
|
834
|
+
Returns:
|
|
835
|
+
Truncated context with ellipsis if needed
|
|
836
|
+
"""
|
|
837
|
+
estimated_tokens = self._estimate_tokens(context)
|
|
838
|
+
|
|
839
|
+
if estimated_tokens <= max_tokens:
|
|
840
|
+
return context
|
|
841
|
+
|
|
842
|
+
# Calculate how many characters we can keep
|
|
843
|
+
target_chars = max_tokens * 4 # Using same 4:1 ratio
|
|
844
|
+
|
|
845
|
+
# Truncate and add ellipsis
|
|
846
|
+
truncated = context[: target_chars - 20] # Leave room for ellipsis
|
|
847
|
+
truncated += "\n... [context truncated for length]"
|
|
848
|
+
|
|
849
|
+
self.log.warning(
|
|
850
|
+
f"RAG context truncated from ~{estimated_tokens} to ~{max_tokens} tokens"
|
|
851
|
+
)
|
|
852
|
+
return truncated
|
|
853
|
+
|
|
854
|
+
def _enhance_with_rag(self, message: str) -> tuple:
|
|
855
|
+
"""
|
|
856
|
+
Enhance user message with relevant document context using RAG.
|
|
857
|
+
|
|
858
|
+
Args:
|
|
859
|
+
message: Original user message
|
|
860
|
+
|
|
861
|
+
Returns:
|
|
862
|
+
Tuple of (enhanced_message, metadata_dict)
|
|
863
|
+
"""
|
|
864
|
+
if not self.rag_enabled or not self.rag:
|
|
865
|
+
return message, None
|
|
866
|
+
|
|
867
|
+
try:
|
|
868
|
+
# Query RAG for relevant context with metadata
|
|
869
|
+
rag_response = self.rag.query(message, include_metadata=True)
|
|
870
|
+
|
|
871
|
+
if rag_response.chunks:
|
|
872
|
+
# Build context with source information
|
|
873
|
+
context_parts = []
|
|
874
|
+
if rag_response.chunk_metadata:
|
|
875
|
+
for i, (chunk, metadata) in enumerate(
|
|
876
|
+
zip(rag_response.chunks, rag_response.chunk_metadata)
|
|
877
|
+
):
|
|
878
|
+
context_parts.append(
|
|
879
|
+
f"Context {i+1} (from {metadata['source_file']}, relevance: {metadata['relevance_score']:.2f}):\n{chunk}"
|
|
880
|
+
)
|
|
881
|
+
else:
|
|
882
|
+
context_parts = [
|
|
883
|
+
f"Context {i+1}:\n{chunk}"
|
|
884
|
+
for i, chunk in enumerate(rag_response.chunks)
|
|
885
|
+
]
|
|
886
|
+
|
|
887
|
+
context = "\n\n".join(context_parts)
|
|
888
|
+
|
|
889
|
+
# Check token limits
|
|
890
|
+
message_tokens = self._estimate_tokens(message)
|
|
891
|
+
template_tokens = 150 # Template text overhead
|
|
892
|
+
response_tokens = self.config.max_tokens
|
|
893
|
+
history_tokens = self._estimate_tokens(str(self.chat_history))
|
|
894
|
+
|
|
895
|
+
# Conservative context size for models
|
|
896
|
+
model_context_size = 32768
|
|
897
|
+
available_for_rag = (
|
|
898
|
+
model_context_size
|
|
899
|
+
- message_tokens
|
|
900
|
+
- template_tokens
|
|
901
|
+
- response_tokens
|
|
902
|
+
- history_tokens
|
|
903
|
+
)
|
|
904
|
+
|
|
905
|
+
# Ensure minimum RAG context
|
|
906
|
+
if available_for_rag < 500:
|
|
907
|
+
self.log.warning(
|
|
908
|
+
f"Limited space for RAG context: {available_for_rag} tokens"
|
|
909
|
+
)
|
|
910
|
+
available_for_rag = 500
|
|
911
|
+
|
|
912
|
+
# Truncate context if needed
|
|
913
|
+
context = self._truncate_rag_context(context, available_for_rag)
|
|
914
|
+
|
|
915
|
+
# Build enhanced message
|
|
916
|
+
enhanced_message = f"""Based on the provided documents, please answer the following question. Use the context below to inform your response.
|
|
917
|
+
|
|
918
|
+
Context from documents:
|
|
919
|
+
{context}
|
|
920
|
+
|
|
921
|
+
User question: {message}
|
|
922
|
+
|
|
923
|
+
Note: When citing information, please mention which context number it came from."""
|
|
924
|
+
|
|
925
|
+
# Prepare metadata for return
|
|
926
|
+
metadata = {
|
|
927
|
+
"rag_used": True,
|
|
928
|
+
"chunks_retrieved": len(rag_response.chunks),
|
|
929
|
+
"estimated_context_tokens": self._estimate_tokens(context),
|
|
930
|
+
"available_tokens": available_for_rag,
|
|
931
|
+
"context_truncated": (
|
|
932
|
+
len(context) < sum(len(c) for c in rag_response.chunks)
|
|
933
|
+
if rag_response.chunks
|
|
934
|
+
else False
|
|
935
|
+
),
|
|
936
|
+
}
|
|
937
|
+
|
|
938
|
+
# Add query metadata if available
|
|
939
|
+
if rag_response.query_metadata:
|
|
940
|
+
metadata["query_metadata"] = rag_response.query_metadata
|
|
941
|
+
|
|
942
|
+
self.log.debug(
|
|
943
|
+
f"Enhanced message with {len(rag_response.chunks)} chunks from "
|
|
944
|
+
f"{len(set(rag_response.source_files)) if rag_response.source_files else 0} documents, "
|
|
945
|
+
f"~{metadata['estimated_context_tokens']} context tokens"
|
|
946
|
+
)
|
|
947
|
+
return enhanced_message, metadata
|
|
948
|
+
else:
|
|
949
|
+
self.log.debug("No relevant document context found")
|
|
950
|
+
return message, {"rag_used": True, "chunks_retrieved": 0}
|
|
951
|
+
|
|
952
|
+
except Exception as e:
|
|
953
|
+
self.log.warning(
|
|
954
|
+
f"RAG enhancement failed: {e}, falling back to direct query"
|
|
955
|
+
)
|
|
956
|
+
return message, {"rag_used": False, "error": str(e)}
|
|
957
|
+
|
|
958
|
+
|
|
959
|
+
class SimpleChat:
|
|
960
|
+
"""
|
|
961
|
+
Ultra-simple interface for quick chat integration.
|
|
962
|
+
|
|
963
|
+
Example usage:
|
|
964
|
+
```python
|
|
965
|
+
from gaia.chat.sdk import SimpleChat
|
|
966
|
+
|
|
967
|
+
chat = SimpleChat()
|
|
968
|
+
|
|
969
|
+
# Simple question-answer
|
|
970
|
+
response = await chat.ask("What's the weather like?")
|
|
971
|
+
print(response)
|
|
972
|
+
|
|
973
|
+
# Chat with memory
|
|
974
|
+
response1 = await chat.ask("My name is John")
|
|
975
|
+
response2 = await chat.ask("What's my name?") # Remembers previous context
|
|
976
|
+
```
|
|
977
|
+
"""
|
|
978
|
+
|
|
979
|
+
def __init__(
|
|
980
|
+
self,
|
|
981
|
+
system_prompt: Optional[str] = None,
|
|
982
|
+
model: Optional[str] = None,
|
|
983
|
+
assistant_name: Optional[str] = None,
|
|
984
|
+
):
|
|
985
|
+
"""
|
|
986
|
+
Initialize SimpleChat with minimal configuration.
|
|
987
|
+
|
|
988
|
+
Args:
|
|
989
|
+
system_prompt: Optional system prompt for the AI
|
|
990
|
+
model: Model to use (defaults to DEFAULT_MODEL_NAME)
|
|
991
|
+
assistant_name: Name to use for the assistant (defaults to "assistant")
|
|
992
|
+
"""
|
|
993
|
+
config = ChatConfig(
|
|
994
|
+
model=model or DEFAULT_MODEL_NAME,
|
|
995
|
+
system_prompt=system_prompt,
|
|
996
|
+
assistant_name=assistant_name or "gaia",
|
|
997
|
+
show_stats=False,
|
|
998
|
+
logging_level="WARNING", # Minimal logging
|
|
999
|
+
)
|
|
1000
|
+
self._sdk = ChatSDK(config)
|
|
1001
|
+
|
|
1002
|
+
def ask(self, question: str) -> str:
|
|
1003
|
+
"""
|
|
1004
|
+
Ask a question and get a text response with conversation memory.
|
|
1005
|
+
|
|
1006
|
+
Args:
|
|
1007
|
+
question: The question to ask
|
|
1008
|
+
|
|
1009
|
+
Returns:
|
|
1010
|
+
The AI's response as a string
|
|
1011
|
+
"""
|
|
1012
|
+
response = self._sdk.send(question)
|
|
1013
|
+
return response.text
|
|
1014
|
+
|
|
1015
|
+
def ask_stream(self, question: str):
|
|
1016
|
+
"""
|
|
1017
|
+
Ask a question and get a streaming response with conversation memory.
|
|
1018
|
+
|
|
1019
|
+
Args:
|
|
1020
|
+
question: The question to ask
|
|
1021
|
+
|
|
1022
|
+
Yields:
|
|
1023
|
+
Response chunks as they arrive
|
|
1024
|
+
"""
|
|
1025
|
+
for chunk in self._sdk.send_stream(question):
|
|
1026
|
+
if not chunk.is_complete:
|
|
1027
|
+
yield chunk.text
|
|
1028
|
+
|
|
1029
|
+
def clear_memory(self) -> None:
|
|
1030
|
+
"""Clear the conversation memory."""
|
|
1031
|
+
self._sdk.clear_history()
|
|
1032
|
+
|
|
1033
|
+
def get_conversation(self) -> List[Dict[str, str]]:
|
|
1034
|
+
"""Get the conversation history in a readable format."""
|
|
1035
|
+
return self._sdk.get_formatted_history()
|
|
1036
|
+
|
|
1037
|
+
|
|
1038
|
+
class ChatSession:
|
|
1039
|
+
"""
|
|
1040
|
+
Session-based chat interface for managing multiple separate conversations.
|
|
1041
|
+
|
|
1042
|
+
Example usage:
|
|
1043
|
+
```python
|
|
1044
|
+
from gaia.chat.sdk import ChatSession
|
|
1045
|
+
|
|
1046
|
+
# Create session manager
|
|
1047
|
+
sessions = ChatSession()
|
|
1048
|
+
|
|
1049
|
+
# Create different chat sessions
|
|
1050
|
+
work_chat = sessions.create_session("work", system_prompt="You are a professional assistant")
|
|
1051
|
+
personal_chat = sessions.create_session("personal", system_prompt="You are a friendly companion")
|
|
1052
|
+
|
|
1053
|
+
# Chat in different contexts
|
|
1054
|
+
work_response = await work_chat.ask("Draft an email to my team")
|
|
1055
|
+
personal_response = await personal_chat.ask("What's a good recipe for dinner?")
|
|
1056
|
+
```
|
|
1057
|
+
"""
|
|
1058
|
+
|
|
1059
|
+
def __init__(self, default_config: Optional[ChatConfig] = None):
|
|
1060
|
+
"""Initialize the session manager."""
|
|
1061
|
+
self.default_config = default_config or ChatConfig()
|
|
1062
|
+
self.sessions: Dict[str, ChatSDK] = {}
|
|
1063
|
+
self.log = get_logger(__name__)
|
|
1064
|
+
|
|
1065
|
+
def create_session(
|
|
1066
|
+
self, session_id: str, config: Optional[ChatConfig] = None, **config_kwargs
|
|
1067
|
+
) -> ChatSDK:
|
|
1068
|
+
"""
|
|
1069
|
+
Create a new chat session.
|
|
1070
|
+
|
|
1071
|
+
Args:
|
|
1072
|
+
session_id: Unique identifier for the session
|
|
1073
|
+
config: Optional configuration (uses default if not provided)
|
|
1074
|
+
**config_kwargs: Configuration parameters to override
|
|
1075
|
+
|
|
1076
|
+
Returns:
|
|
1077
|
+
ChatSDK instance for the session
|
|
1078
|
+
"""
|
|
1079
|
+
if config is None:
|
|
1080
|
+
# Create config from defaults with overrides
|
|
1081
|
+
config = ChatConfig(
|
|
1082
|
+
model=config_kwargs.get("model", self.default_config.model),
|
|
1083
|
+
max_tokens=config_kwargs.get(
|
|
1084
|
+
"max_tokens", self.default_config.max_tokens
|
|
1085
|
+
),
|
|
1086
|
+
system_prompt=config_kwargs.get(
|
|
1087
|
+
"system_prompt", self.default_config.system_prompt
|
|
1088
|
+
),
|
|
1089
|
+
max_history_length=config_kwargs.get(
|
|
1090
|
+
"max_history_length", self.default_config.max_history_length
|
|
1091
|
+
),
|
|
1092
|
+
show_stats=config_kwargs.get(
|
|
1093
|
+
"show_stats", self.default_config.show_stats
|
|
1094
|
+
),
|
|
1095
|
+
logging_level=config_kwargs.get(
|
|
1096
|
+
"logging_level", self.default_config.logging_level
|
|
1097
|
+
),
|
|
1098
|
+
use_claude=config_kwargs.get(
|
|
1099
|
+
"use_claude", self.default_config.use_claude
|
|
1100
|
+
),
|
|
1101
|
+
use_chatgpt=config_kwargs.get(
|
|
1102
|
+
"use_chatgpt", self.default_config.use_chatgpt
|
|
1103
|
+
),
|
|
1104
|
+
assistant_name=config_kwargs.get(
|
|
1105
|
+
"assistant_name", self.default_config.assistant_name
|
|
1106
|
+
),
|
|
1107
|
+
)
|
|
1108
|
+
|
|
1109
|
+
session = ChatSDK(config)
|
|
1110
|
+
self.sessions[session_id] = session
|
|
1111
|
+
self.log.debug(f"Created chat session: {session_id}")
|
|
1112
|
+
return session
|
|
1113
|
+
|
|
1114
|
+
def get_session(self, session_id: str) -> Optional[ChatSDK]:
|
|
1115
|
+
"""Get an existing session by ID."""
|
|
1116
|
+
return self.sessions.get(session_id)
|
|
1117
|
+
|
|
1118
|
+
def delete_session(self, session_id: str) -> bool:
|
|
1119
|
+
"""Delete a session."""
|
|
1120
|
+
if session_id in self.sessions:
|
|
1121
|
+
del self.sessions[session_id]
|
|
1122
|
+
self.log.debug(f"Deleted chat session: {session_id}")
|
|
1123
|
+
return True
|
|
1124
|
+
return False
|
|
1125
|
+
|
|
1126
|
+
def list_sessions(self) -> List[str]:
|
|
1127
|
+
"""List all active session IDs."""
|
|
1128
|
+
return list(self.sessions.keys())
|
|
1129
|
+
|
|
1130
|
+
def clear_all_sessions(self) -> None:
|
|
1131
|
+
"""Clear all sessions."""
|
|
1132
|
+
self.sessions.clear()
|
|
1133
|
+
self.log.debug("Cleared all chat sessions")
|
|
1134
|
+
|
|
1135
|
+
|
|
1136
|
+
# Convenience functions for one-off usage
|
|
1137
|
+
def quick_chat(
|
|
1138
|
+
message: str,
|
|
1139
|
+
system_prompt: Optional[str] = None,
|
|
1140
|
+
model: Optional[str] = None,
|
|
1141
|
+
assistant_name: Optional[str] = None,
|
|
1142
|
+
) -> str:
|
|
1143
|
+
"""
|
|
1144
|
+
Quick one-off text chat without conversation memory.
|
|
1145
|
+
|
|
1146
|
+
Args:
|
|
1147
|
+
message: Message to send
|
|
1148
|
+
system_prompt: Optional system prompt
|
|
1149
|
+
model: Optional model to use
|
|
1150
|
+
assistant_name: Name to use for the assistant
|
|
1151
|
+
|
|
1152
|
+
Returns:
|
|
1153
|
+
AI response
|
|
1154
|
+
"""
|
|
1155
|
+
config = ChatConfig(
|
|
1156
|
+
model=model or DEFAULT_MODEL_NAME,
|
|
1157
|
+
system_prompt=system_prompt,
|
|
1158
|
+
assistant_name=assistant_name or "gaia",
|
|
1159
|
+
show_stats=False,
|
|
1160
|
+
logging_level="WARNING",
|
|
1161
|
+
max_history_length=2, # Small history for quick chat
|
|
1162
|
+
)
|
|
1163
|
+
sdk = ChatSDK(config)
|
|
1164
|
+
response = sdk.send(message)
|
|
1165
|
+
return response.text
|
|
1166
|
+
|
|
1167
|
+
|
|
1168
|
+
def quick_chat_with_memory(
|
|
1169
|
+
messages: List[str],
|
|
1170
|
+
system_prompt: Optional[str] = None,
|
|
1171
|
+
model: Optional[str] = None,
|
|
1172
|
+
assistant_name: Optional[str] = None,
|
|
1173
|
+
) -> List[str]:
|
|
1174
|
+
"""
|
|
1175
|
+
Quick multi-turn chat with conversation memory.
|
|
1176
|
+
|
|
1177
|
+
Args:
|
|
1178
|
+
messages: List of messages to send sequentially
|
|
1179
|
+
system_prompt: Optional system prompt
|
|
1180
|
+
model: Optional model to use
|
|
1181
|
+
assistant_name: Name to use for the assistant
|
|
1182
|
+
|
|
1183
|
+
Returns:
|
|
1184
|
+
List of AI responses
|
|
1185
|
+
"""
|
|
1186
|
+
config = ChatConfig(
|
|
1187
|
+
model=model or DEFAULT_MODEL_NAME,
|
|
1188
|
+
system_prompt=system_prompt,
|
|
1189
|
+
assistant_name=assistant_name or "gaia",
|
|
1190
|
+
show_stats=False,
|
|
1191
|
+
logging_level="WARNING",
|
|
1192
|
+
)
|
|
1193
|
+
sdk = ChatSDK(config)
|
|
1194
|
+
|
|
1195
|
+
responses = []
|
|
1196
|
+
for message in messages:
|
|
1197
|
+
response = sdk.send(message)
|
|
1198
|
+
responses.append(response.text)
|
|
1199
|
+
|
|
1200
|
+
return responses
|