docagent-cli 0.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docagent_cli/__init__.py +36 -0
- docagent_cli/__main__.py +6 -0
- docagent_cli/_ask_user_types.py +90 -0
- docagent_cli/_cli_context.py +27 -0
- docagent_cli/_debug.py +52 -0
- docagent_cli/_env_vars.py +56 -0
- docagent_cli/_server_config.py +352 -0
- docagent_cli/_session_stats.py +114 -0
- docagent_cli/_testing_models.py +144 -0
- docagent_cli/_version.py +17 -0
- docagent_cli/agent.py +1193 -0
- docagent_cli/app.py +4979 -0
- docagent_cli/app.tcss +283 -0
- docagent_cli/ask_user.py +301 -0
- docagent_cli/built_in_skills/__init__.py +5 -0
- docagent_cli/built_in_skills/doc-coauthoring/SKILL.md +375 -0
- docagent_cli/built_in_skills/docx/LICENSE.txt +30 -0
- docagent_cli/built_in_skills/docx/SKILL.md +590 -0
- docagent_cli/built_in_skills/docx/scripts/__init__.py +1 -0
- docagent_cli/built_in_skills/docx/scripts/accept_changes.py +135 -0
- docagent_cli/built_in_skills/docx/scripts/comment.py +318 -0
- docagent_cli/built_in_skills/docx/scripts/office/helpers/__init__.py +0 -0
- docagent_cli/built_in_skills/docx/scripts/office/helpers/merge_runs.py +199 -0
- docagent_cli/built_in_skills/docx/scripts/office/helpers/simplify_redlines.py +197 -0
- docagent_cli/built_in_skills/docx/scripts/office/pack.py +159 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd +1499 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd +146 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd +1085 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd +11 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd +3081 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd +23 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd +185 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd +287 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd +1676 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd +28 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd +144 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd +174 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd +25 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd +18 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd +59 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd +56 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd +195 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd +582 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd +25 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd +4439 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd +570 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd +509 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd +12 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd +108 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd +96 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd +3646 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd +116 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd +42 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd +50 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd +49 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd +33 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/mce/mc.xsd +75 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-2010.xsd +560 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-2012.xsd +67 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-2018.xsd +14 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-cex-2018.xsd +20 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-cid-2016.xsd +13 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd +4 -0
- docagent_cli/built_in_skills/docx/scripts/office/schemas/microsoft/wml-symex-2015.xsd +8 -0
- docagent_cli/built_in_skills/docx/scripts/office/soffice.py +183 -0
- docagent_cli/built_in_skills/docx/scripts/office/unpack.py +132 -0
- docagent_cli/built_in_skills/docx/scripts/office/validate.py +111 -0
- docagent_cli/built_in_skills/docx/scripts/office/validators/__init__.py +15 -0
- docagent_cli/built_in_skills/docx/scripts/office/validators/base.py +847 -0
- docagent_cli/built_in_skills/docx/scripts/office/validators/docx.py +446 -0
- docagent_cli/built_in_skills/docx/scripts/office/validators/pptx.py +275 -0
- docagent_cli/built_in_skills/docx/scripts/office/validators/redlining.py +247 -0
- docagent_cli/built_in_skills/docx/scripts/templates/comments.xml +3 -0
- docagent_cli/built_in_skills/docx/scripts/templates/commentsExtended.xml +3 -0
- docagent_cli/built_in_skills/docx/scripts/templates/commentsExtensible.xml +3 -0
- docagent_cli/built_in_skills/docx/scripts/templates/commentsIds.xml +3 -0
- docagent_cli/built_in_skills/docx/scripts/templates/people.xml +3 -0
- docagent_cli/built_in_skills/pdf/LICENSE.txt +30 -0
- docagent_cli/built_in_skills/pdf/SKILL.md +314 -0
- docagent_cli/built_in_skills/pdf/forms.md +294 -0
- docagent_cli/built_in_skills/pdf/reference.md +612 -0
- docagent_cli/built_in_skills/pdf/scripts/check_bounding_boxes.py +65 -0
- docagent_cli/built_in_skills/pdf/scripts/check_fillable_fields.py +11 -0
- docagent_cli/built_in_skills/pdf/scripts/convert_pdf_to_images.py +33 -0
- docagent_cli/built_in_skills/pdf/scripts/create_validation_image.py +37 -0
- docagent_cli/built_in_skills/pdf/scripts/extract_form_field_info.py +122 -0
- docagent_cli/built_in_skills/pdf/scripts/extract_form_structure.py +115 -0
- docagent_cli/built_in_skills/pdf/scripts/fill_fillable_fields.py +98 -0
- docagent_cli/built_in_skills/pdf/scripts/fill_pdf_form_with_annotations.py +107 -0
- docagent_cli/built_in_skills/pptx/LICENSE.txt +30 -0
- docagent_cli/built_in_skills/pptx/SKILL.md +232 -0
- docagent_cli/built_in_skills/pptx/editing.md +205 -0
- docagent_cli/built_in_skills/pptx/pptxgenjs.md +420 -0
- docagent_cli/built_in_skills/pptx/scripts/__init__.py +0 -0
- docagent_cli/built_in_skills/pptx/scripts/add_slide.py +195 -0
- docagent_cli/built_in_skills/pptx/scripts/clean.py +286 -0
- docagent_cli/built_in_skills/pptx/scripts/office/helpers/__init__.py +0 -0
- docagent_cli/built_in_skills/pptx/scripts/office/helpers/merge_runs.py +199 -0
- docagent_cli/built_in_skills/pptx/scripts/office/helpers/simplify_redlines.py +197 -0
- docagent_cli/built_in_skills/pptx/scripts/office/pack.py +159 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd +1499 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd +146 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd +1085 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd +11 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd +3081 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd +23 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd +185 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd +287 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd +1676 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd +28 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd +144 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd +174 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd +25 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd +18 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd +59 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd +56 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd +195 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd +582 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd +25 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd +4439 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd +570 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd +509 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd +12 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd +108 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd +96 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd +3646 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd +116 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd +42 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd +50 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd +49 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd +33 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/mce/mc.xsd +75 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-2010.xsd +560 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-2012.xsd +67 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-2018.xsd +14 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-cex-2018.xsd +20 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-cid-2016.xsd +13 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd +4 -0
- docagent_cli/built_in_skills/pptx/scripts/office/schemas/microsoft/wml-symex-2015.xsd +8 -0
- docagent_cli/built_in_skills/pptx/scripts/office/soffice.py +183 -0
- docagent_cli/built_in_skills/pptx/scripts/office/unpack.py +132 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validate.py +111 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validators/__init__.py +15 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validators/base.py +847 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validators/docx.py +446 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validators/pptx.py +275 -0
- docagent_cli/built_in_skills/pptx/scripts/office/validators/redlining.py +247 -0
- docagent_cli/built_in_skills/pptx/scripts/thumbnail.py +289 -0
- docagent_cli/built_in_skills/remember/SKILL.md +118 -0
- docagent_cli/built_in_skills/skill-creator/LICENSE.txt +202 -0
- docagent_cli/built_in_skills/skill-creator/SKILL.md +485 -0
- docagent_cli/built_in_skills/skill-creator/agents/analyzer.md +274 -0
- docagent_cli/built_in_skills/skill-creator/agents/comparator.md +202 -0
- docagent_cli/built_in_skills/skill-creator/agents/grader.md +223 -0
- docagent_cli/built_in_skills/skill-creator/assets/eval_review.html +146 -0
- docagent_cli/built_in_skills/skill-creator/eval-viewer/generate_review.py +471 -0
- docagent_cli/built_in_skills/skill-creator/eval-viewer/viewer.html +1325 -0
- docagent_cli/built_in_skills/skill-creator/references/schemas.md +430 -0
- docagent_cli/built_in_skills/skill-creator/scripts/__init__.py +0 -0
- docagent_cli/built_in_skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
- docagent_cli/built_in_skills/skill-creator/scripts/generate_report.py +326 -0
- docagent_cli/built_in_skills/skill-creator/scripts/improve_description.py +247 -0
- docagent_cli/built_in_skills/skill-creator/scripts/package_skill.py +136 -0
- docagent_cli/built_in_skills/skill-creator/scripts/quick_validate.py +103 -0
- docagent_cli/built_in_skills/skill-creator/scripts/run_eval.py +310 -0
- docagent_cli/built_in_skills/skill-creator/scripts/run_loop.py +328 -0
- docagent_cli/built_in_skills/skill-creator/scripts/utils.py +47 -0
- docagent_cli/built_in_skills/theme-factory/LICENSE.txt +202 -0
- docagent_cli/built_in_skills/theme-factory/SKILL.md +59 -0
- docagent_cli/built_in_skills/theme-factory/theme-showcase.pdf +0 -0
- docagent_cli/built_in_skills/theme-factory/themes/arctic-frost.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/botanical-garden.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/desert-rose.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/forest-canopy.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/golden-hour.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/midnight-galaxy.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/modern-minimalist.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/ocean-depths.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/sunset-boulevard.md +19 -0
- docagent_cli/built_in_skills/theme-factory/themes/tech-innovation.md +19 -0
- docagent_cli/built_in_skills/xlsx/LICENSE.txt +30 -0
- docagent_cli/built_in_skills/xlsx/SKILL.md +292 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/helpers/__init__.py +0 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/helpers/merge_runs.py +199 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/helpers/simplify_redlines.py +197 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/pack.py +159 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd +1499 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd +146 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd +1085 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd +11 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd +3081 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd +23 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd +185 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd +287 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd +1676 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd +28 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd +144 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd +174 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd +25 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd +18 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd +59 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd +56 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd +195 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd +582 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd +25 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd +4439 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd +570 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd +509 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd +12 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd +108 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd +96 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd +3646 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd +116 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd +42 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd +50 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd +49 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd +33 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/mce/mc.xsd +75 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-2010.xsd +560 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-2012.xsd +67 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-2018.xsd +14 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-cex-2018.xsd +20 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-cid-2016.xsd +13 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd +4 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/schemas/microsoft/wml-symex-2015.xsd +8 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/soffice.py +183 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/unpack.py +132 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validate.py +111 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validators/__init__.py +15 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validators/base.py +847 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validators/docx.py +446 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validators/pptx.py +275 -0
- docagent_cli/built_in_skills/xlsx/scripts/office/validators/redlining.py +247 -0
- docagent_cli/built_in_skills/xlsx/scripts/recalc.py +184 -0
- docagent_cli/clipboard.py +128 -0
- docagent_cli/command_registry.py +284 -0
- docagent_cli/config.py +2418 -0
- docagent_cli/configurable_model.py +162 -0
- docagent_cli/default_agent_prompt.md +12 -0
- docagent_cli/editor.py +142 -0
- docagent_cli/file_ops.py +473 -0
- docagent_cli/formatting.py +28 -0
- docagent_cli/hooks.py +206 -0
- docagent_cli/input.py +787 -0
- docagent_cli/integrations/__init__.py +1 -0
- docagent_cli/integrations/sandbox_factory.py +873 -0
- docagent_cli/integrations/sandbox_provider.py +71 -0
- docagent_cli/local_context.py +718 -0
- docagent_cli/main.py +1641 -0
- docagent_cli/mcp_tools.py +707 -0
- docagent_cli/mcp_trust.py +168 -0
- docagent_cli/media_utils.py +478 -0
- docagent_cli/model_config.py +1620 -0
- docagent_cli/non_interactive.py +948 -0
- docagent_cli/offload.py +371 -0
- docagent_cli/output.py +69 -0
- docagent_cli/project_utils.py +188 -0
- docagent_cli/py.typed +0 -0
- docagent_cli/remote_client.py +515 -0
- docagent_cli/server.py +520 -0
- docagent_cli/server_graph.py +196 -0
- docagent_cli/server_manager.py +365 -0
- docagent_cli/sessions.py +1262 -0
- docagent_cli/skills/__init__.py +18 -0
- docagent_cli/skills/commands.py +1090 -0
- docagent_cli/skills/load.py +192 -0
- docagent_cli/subagents.py +173 -0
- docagent_cli/system_prompt.md +247 -0
- docagent_cli/textual_adapter.py +1352 -0
- docagent_cli/theme.py +842 -0
- docagent_cli/token_state.py +31 -0
- docagent_cli/tool_display.py +298 -0
- docagent_cli/tools.py +236 -0
- docagent_cli/ui.py +420 -0
- docagent_cli/unicode_security.py +516 -0
- docagent_cli/update_check.py +454 -0
- docagent_cli/widgets/__init__.py +9 -0
- docagent_cli/widgets/_links.py +63 -0
- docagent_cli/widgets/approval.py +442 -0
- docagent_cli/widgets/ask_user.py +398 -0
- docagent_cli/widgets/autocomplete.py +691 -0
- docagent_cli/widgets/chat_input.py +1827 -0
- docagent_cli/widgets/diff.py +248 -0
- docagent_cli/widgets/history.py +188 -0
- docagent_cli/widgets/loading.py +177 -0
- docagent_cli/widgets/mcp_viewer.py +362 -0
- docagent_cli/widgets/message_store.py +675 -0
- docagent_cli/widgets/messages.py +1751 -0
- docagent_cli/widgets/model_selector.py +964 -0
- docagent_cli/widgets/status.py +372 -0
- docagent_cli/widgets/theme_selector.py +164 -0
- docagent_cli/widgets/thread_selector.py +1905 -0
- docagent_cli/widgets/tool_renderers.py +148 -0
- docagent_cli/widgets/tool_widgets.py +274 -0
- docagent_cli/widgets/welcome.py +339 -0
- docagent_cli-0.0.35.data/data/docagent_cli/default_agent_prompt.md +12 -0
- docagent_cli-0.0.35.dist-info/METADATA +200 -0
- docagent_cli-0.0.35.dist-info/RECORD +300 -0
- docagent_cli-0.0.35.dist-info/WHEEL +4 -0
- docagent_cli-0.0.35.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,1352 @@
|
|
|
1
|
+
"""Textual UI adapter for agent execution."""
|
|
2
|
+
# This module has complex streaming logic ported from execution.py
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import contextlib
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import TYPE_CHECKING, Any
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from collections.abc import Awaitable, Callable
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
from langchain.agents.middleware.human_in_the_loop import (
|
|
19
|
+
ApproveDecision,
|
|
20
|
+
EditDecision,
|
|
21
|
+
HITLRequest,
|
|
22
|
+
RejectDecision,
|
|
23
|
+
)
|
|
24
|
+
from langchain_core.messages import AIMessage
|
|
25
|
+
from langchain_core.runnables import RunnableConfig
|
|
26
|
+
from langgraph.types import Command, Interrupt
|
|
27
|
+
from pydantic import TypeAdapter
|
|
28
|
+
from rich.console import Console
|
|
29
|
+
|
|
30
|
+
from docagent_cli._ask_user_types import AskUserWidgetResult, Question
|
|
31
|
+
|
|
32
|
+
# Type alias matching HITLResponse["decisions"] element type
|
|
33
|
+
HITLDecision = ApproveDecision | EditDecision | RejectDecision
|
|
34
|
+
|
|
35
|
+
from docagent_cli._ask_user_types import AskUserRequest
|
|
36
|
+
from docagent_cli._cli_context import CLIContext # noqa: TC001
|
|
37
|
+
from docagent_cli._debug import configure_debug_logging
|
|
38
|
+
from docagent_cli._session_stats import (
|
|
39
|
+
ModelStats as ModelStats,
|
|
40
|
+
SessionStats as SessionStats,
|
|
41
|
+
SpinnerStatus as SpinnerStatus,
|
|
42
|
+
format_token_count as format_token_count,
|
|
43
|
+
)
|
|
44
|
+
from docagent_cli.config import build_stream_config
|
|
45
|
+
from docagent_cli.file_ops import FileOpTracker
|
|
46
|
+
from docagent_cli.formatting import format_duration
|
|
47
|
+
from docagent_cli.hooks import dispatch_hook
|
|
48
|
+
from docagent_cli.input import MediaTracker, parse_file_mentions
|
|
49
|
+
from docagent_cli.media_utils import create_multimodal_content
|
|
50
|
+
from docagent_cli.tool_display import format_tool_message_content
|
|
51
|
+
from docagent_cli.widgets.messages import (
|
|
52
|
+
AppMessage,
|
|
53
|
+
AssistantMessage,
|
|
54
|
+
DiffMessage,
|
|
55
|
+
SummarizationMessage,
|
|
56
|
+
ToolCallMessage,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
logger = logging.getLogger(__name__)
|
|
60
|
+
configure_debug_logging(logger)
|
|
61
|
+
|
|
62
|
+
_hitl_adapter_cache: TypeAdapter | None = None
|
|
63
|
+
"""Lazy singleton for the HITL request validator."""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _get_hitl_request_adapter(hitl_request_type: type) -> TypeAdapter:
|
|
67
|
+
"""Return a cached `TypeAdapter(HITLRequest)`.
|
|
68
|
+
|
|
69
|
+
Avoids re-compiling the pydantic schema on every `execute_task_textual` call.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
hitl_request_type: The `HITLRequest` class (passed in because
|
|
73
|
+
it is imported locally by the caller).
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Shared `TypeAdapter` instance.
|
|
77
|
+
"""
|
|
78
|
+
global _hitl_adapter_cache # noqa: PLW0603
|
|
79
|
+
if _hitl_adapter_cache is None:
|
|
80
|
+
from pydantic import TypeAdapter
|
|
81
|
+
|
|
82
|
+
_hitl_adapter_cache = TypeAdapter(hitl_request_type)
|
|
83
|
+
return _hitl_adapter_cache
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def print_usage_table(
|
|
87
|
+
stats: SessionStats,
|
|
88
|
+
wall_time: float,
|
|
89
|
+
console: Console,
|
|
90
|
+
) -> None:
|
|
91
|
+
"""Print a model-usage stats table to a Rich console.
|
|
92
|
+
|
|
93
|
+
When the session spans multiple models each gets its own row with a
|
|
94
|
+
totals row appended; single-model sessions show one row.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
stats: Cumulative session stats.
|
|
98
|
+
wall_time: Total wall-clock time in seconds.
|
|
99
|
+
console: Rich console for output.
|
|
100
|
+
"""
|
|
101
|
+
from rich.table import Table
|
|
102
|
+
|
|
103
|
+
has_time = wall_time >= 0.1 # noqa: PLR2004
|
|
104
|
+
if not (stats.request_count or stats.input_tokens or has_time):
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
if stats.per_model:
|
|
108
|
+
multi_model = len(stats.per_model) > 1
|
|
109
|
+
|
|
110
|
+
table = Table(
|
|
111
|
+
show_header=True,
|
|
112
|
+
header_style="bold",
|
|
113
|
+
box=None,
|
|
114
|
+
padding=(0, 2, 0, 0),
|
|
115
|
+
show_edge=False,
|
|
116
|
+
)
|
|
117
|
+
table.add_column("Model", style="dim")
|
|
118
|
+
table.add_column("Reqs", justify="right", style="dim")
|
|
119
|
+
table.add_column("InputTok", justify="right", style="dim")
|
|
120
|
+
table.add_column("OutputTok", justify="right", style="dim")
|
|
121
|
+
|
|
122
|
+
if multi_model:
|
|
123
|
+
for model_name, ms in stats.per_model.items():
|
|
124
|
+
table.add_row(
|
|
125
|
+
model_name,
|
|
126
|
+
str(ms.request_count),
|
|
127
|
+
format_token_count(ms.input_tokens),
|
|
128
|
+
format_token_count(ms.output_tokens),
|
|
129
|
+
)
|
|
130
|
+
table.add_row(
|
|
131
|
+
"Total",
|
|
132
|
+
str(stats.request_count),
|
|
133
|
+
format_token_count(stats.input_tokens),
|
|
134
|
+
format_token_count(stats.output_tokens),
|
|
135
|
+
)
|
|
136
|
+
else:
|
|
137
|
+
model_label = next(iter(stats.per_model))
|
|
138
|
+
table.add_row(
|
|
139
|
+
model_label,
|
|
140
|
+
str(stats.request_count),
|
|
141
|
+
format_token_count(stats.input_tokens),
|
|
142
|
+
format_token_count(stats.output_tokens),
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
console.print()
|
|
146
|
+
console.print("[bold]Usage Stats[/bold]")
|
|
147
|
+
console.print(table)
|
|
148
|
+
if has_time:
|
|
149
|
+
console.print()
|
|
150
|
+
console.print(
|
|
151
|
+
f"Agent active {format_duration(wall_time)}",
|
|
152
|
+
style="dim",
|
|
153
|
+
highlight=False,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
_ask_user_adapter_cache: TypeAdapter | None = None
|
|
158
|
+
"""Lazy singleton for the `ask_user` interrupt validator."""
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _get_ask_user_adapter() -> TypeAdapter:
|
|
162
|
+
"""Return a cached `TypeAdapter(AskUserRequest)`.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Shared `TypeAdapter` instance.
|
|
166
|
+
"""
|
|
167
|
+
global _ask_user_adapter_cache # noqa: PLW0603
|
|
168
|
+
if _ask_user_adapter_cache is None:
|
|
169
|
+
from pydantic import TypeAdapter
|
|
170
|
+
|
|
171
|
+
_ask_user_adapter_cache = TypeAdapter(AskUserRequest)
|
|
172
|
+
return _ask_user_adapter_cache
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _is_summarization_chunk(metadata: dict | None) -> bool:
|
|
176
|
+
"""Check if a message chunk is from summarization middleware.
|
|
177
|
+
|
|
178
|
+
The summarization model is invoked with
|
|
179
|
+
`config={"metadata": {"lc_source": "summarization"}}`
|
|
180
|
+
(see `langchain.agents.middleware.summarization`), which
|
|
181
|
+
LangChain's callback system merges into the stream metadata dict.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
metadata: The metadata dict from the stream chunk.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Whether the chunk is from summarization and should be filtered.
|
|
188
|
+
"""
|
|
189
|
+
if metadata is None:
|
|
190
|
+
return False
|
|
191
|
+
return metadata.get("lc_source") == "summarization"
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class TextualUIAdapter:
|
|
195
|
+
"""Adapter for rendering agent output to Textual widgets.
|
|
196
|
+
|
|
197
|
+
This adapter provides an abstraction layer between the agent execution and the
|
|
198
|
+
Textual UI, allowing streaming output to be rendered as widgets.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def __init__(
|
|
202
|
+
self,
|
|
203
|
+
mount_message: Callable[..., Awaitable[None]],
|
|
204
|
+
update_status: Callable[[str], None],
|
|
205
|
+
request_approval: Callable[..., Awaitable[Any]],
|
|
206
|
+
on_auto_approve_enabled: Callable[[], None] | None = None,
|
|
207
|
+
set_spinner: Callable[[SpinnerStatus], Awaitable[None]] | None = None,
|
|
208
|
+
set_active_message: Callable[[str | None], None] | None = None,
|
|
209
|
+
sync_message_content: Callable[[str, str], None] | None = None,
|
|
210
|
+
request_ask_user: (
|
|
211
|
+
Callable[
|
|
212
|
+
[list[Question]],
|
|
213
|
+
Awaitable[asyncio.Future[AskUserWidgetResult] | None],
|
|
214
|
+
]
|
|
215
|
+
| None
|
|
216
|
+
) = None,
|
|
217
|
+
) -> None:
|
|
218
|
+
"""Initialize the adapter."""
|
|
219
|
+
self._mount_message = mount_message
|
|
220
|
+
"""Async callback to mount a message widget to the chat."""
|
|
221
|
+
|
|
222
|
+
self._update_status = update_status
|
|
223
|
+
"""Callback to update the status bar text."""
|
|
224
|
+
|
|
225
|
+
self._request_approval = request_approval
|
|
226
|
+
"""Async callback that returns a Future for HITL approval."""
|
|
227
|
+
|
|
228
|
+
self._on_auto_approve_enabled = on_auto_approve_enabled
|
|
229
|
+
"""Callback invoked when auto-approve is enabled via the HITL approval
|
|
230
|
+
menu.
|
|
231
|
+
|
|
232
|
+
Fired when the user selects "Auto-approve all" from an approval dialog,
|
|
233
|
+
allowing the app to sync its status bar and session state.
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
self._set_spinner = set_spinner
|
|
237
|
+
"""Callback to show/hide loading spinner."""
|
|
238
|
+
|
|
239
|
+
self._set_active_message = set_active_message
|
|
240
|
+
"""Callback to set the active streaming message ID (pass `None` to clear)."""
|
|
241
|
+
|
|
242
|
+
self._sync_message_content = sync_message_content
|
|
243
|
+
"""Callback to sync final message content back to the store after streaming."""
|
|
244
|
+
|
|
245
|
+
self._request_ask_user = request_ask_user
|
|
246
|
+
"""Async callback for `ask_user` interrupts.
|
|
247
|
+
|
|
248
|
+
When awaited, returns a `Future` that resolves to user answers.
|
|
249
|
+
"""
|
|
250
|
+
|
|
251
|
+
# State tracking
|
|
252
|
+
self._current_tool_messages: dict[str, ToolCallMessage] = {}
|
|
253
|
+
"""Map of tool call IDs to their message widgets."""
|
|
254
|
+
|
|
255
|
+
# Token display callbacks (set by the app after construction)
|
|
256
|
+
self._on_tokens_update: Callable[[int], None] | None = None
|
|
257
|
+
"""Called with total context tokens after each LLM response."""
|
|
258
|
+
|
|
259
|
+
self._on_tokens_hide: Callable[[], None] | None = None
|
|
260
|
+
"""Called to hide the token display during streaming."""
|
|
261
|
+
|
|
262
|
+
self._on_tokens_show: Callable[[], None] | None = None
|
|
263
|
+
"""Called to restore the token display with the cached value."""
|
|
264
|
+
|
|
265
|
+
def finalize_pending_tools_with_error(self, error: str) -> None:
|
|
266
|
+
"""Mark all pending/running tool widgets as error and clear tracking.
|
|
267
|
+
|
|
268
|
+
This is used as a safety net when an unexpected exception aborts
|
|
269
|
+
streaming before matching `ToolMessage` results are received.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
error: Error text to display in each pending tool widget.
|
|
273
|
+
"""
|
|
274
|
+
for tool_msg in list(self._current_tool_messages.values()):
|
|
275
|
+
tool_msg.set_error(error)
|
|
276
|
+
self._current_tool_messages.clear()
|
|
277
|
+
|
|
278
|
+
# Clear active streaming message to avoid stale "active" state in the store.
|
|
279
|
+
if self._set_active_message:
|
|
280
|
+
self._set_active_message(None)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _build_interrupted_ai_message(
|
|
284
|
+
pending_text_by_namespace: dict[tuple, str],
|
|
285
|
+
current_tool_messages: dict[str, Any],
|
|
286
|
+
) -> AIMessage | None:
|
|
287
|
+
"""Build an AIMessage capturing interrupted state (text + tool calls).
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
pending_text_by_namespace: Dict of accumulated text by namespace
|
|
291
|
+
current_tool_messages: Dict of tool_id -> ToolCallMessage widget
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
AIMessage with accumulated content and tool calls, or None if empty.
|
|
295
|
+
"""
|
|
296
|
+
from langchain_core.messages import AIMessage
|
|
297
|
+
|
|
298
|
+
main_ns_key = ()
|
|
299
|
+
accumulated_text = pending_text_by_namespace.get(main_ns_key, "").strip()
|
|
300
|
+
|
|
301
|
+
# Reconstruct tool_calls from displayed tool messages
|
|
302
|
+
tool_calls = []
|
|
303
|
+
for tool_id, tool_widget in list(current_tool_messages.items()):
|
|
304
|
+
tool_calls.append(
|
|
305
|
+
{
|
|
306
|
+
"id": tool_id,
|
|
307
|
+
"name": tool_widget._tool_name,
|
|
308
|
+
"args": tool_widget._args,
|
|
309
|
+
}
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
if not accumulated_text and not tool_calls:
|
|
313
|
+
return None
|
|
314
|
+
|
|
315
|
+
return AIMessage(
|
|
316
|
+
content=accumulated_text,
|
|
317
|
+
tool_calls=tool_calls or [],
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def _read_mentioned_file(file_path: Path, max_embed_bytes: int) -> str:
|
|
322
|
+
"""Read a mentioned file for inline embedding (sync, for use with to_thread).
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
file_path: Resolved path to the file.
|
|
326
|
+
max_embed_bytes: Size threshold; larger files get a reference only.
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
Markdown snippet with the file content or a size-exceeded reference.
|
|
330
|
+
"""
|
|
331
|
+
file_size = file_path.stat().st_size
|
|
332
|
+
if file_size > max_embed_bytes:
|
|
333
|
+
size_kb = file_size // 1024
|
|
334
|
+
return (
|
|
335
|
+
f"\n### {file_path.name}\n"
|
|
336
|
+
f"Path: `{file_path}`\n"
|
|
337
|
+
f"Size: {size_kb}KB (too large to embed, "
|
|
338
|
+
"use read_file tool to view)"
|
|
339
|
+
)
|
|
340
|
+
content = file_path.read_text(encoding="utf-8")
|
|
341
|
+
return f"\n### {file_path.name}\nPath: `{file_path}`\n```\n{content}\n```"
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
async def execute_task_textual(
|
|
345
|
+
user_input: str,
|
|
346
|
+
agent: Any, # noqa: ANN401 # Dynamic agent graph type
|
|
347
|
+
assistant_id: str | None,
|
|
348
|
+
session_state: Any, # noqa: ANN401 # Dynamic session state type
|
|
349
|
+
adapter: TextualUIAdapter,
|
|
350
|
+
backend: Any = None, # noqa: ANN401 # Dynamic backend type
|
|
351
|
+
image_tracker: MediaTracker | None = None,
|
|
352
|
+
context: CLIContext | None = None,
|
|
353
|
+
*,
|
|
354
|
+
sandbox_type: str | None = None,
|
|
355
|
+
message_kwargs: dict[str, Any] | None = None,
|
|
356
|
+
turn_stats: SessionStats | None = None,
|
|
357
|
+
) -> SessionStats:
|
|
358
|
+
"""Execute a task with output directed to Textual UI.
|
|
359
|
+
|
|
360
|
+
This is the Textual-compatible version of execute_task() that uses
|
|
361
|
+
the TextualUIAdapter for all UI operations.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
user_input: The user's input message
|
|
365
|
+
agent: The LangGraph agent to execute
|
|
366
|
+
assistant_id: The agent identifier
|
|
367
|
+
session_state: Session state with auto_approve flag
|
|
368
|
+
adapter: The TextualUIAdapter for UI operations
|
|
369
|
+
backend: Optional backend for file operations
|
|
370
|
+
image_tracker: Optional tracker for images
|
|
371
|
+
context: Optional `CLIContext` with model override and params, passed
|
|
372
|
+
to the graph via `context=`.
|
|
373
|
+
sandbox_type: Sandbox provider name for trace metadata, or `None`
|
|
374
|
+
if no sandbox is active.
|
|
375
|
+
message_kwargs: Extra fields merged into the stream input message
|
|
376
|
+
dict (e.g., `additional_kwargs` for persisting skill metadata
|
|
377
|
+
in the checkpoint).
|
|
378
|
+
turn_stats: Pre-created `SessionStats` to accumulate into.
|
|
379
|
+
|
|
380
|
+
When the caller holds a reference to the same object, stats are
|
|
381
|
+
available even if this coroutine is cancelled before it can return.
|
|
382
|
+
|
|
383
|
+
If `None`, a new instance is created internally.
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
Stats accumulated over this turn (request count, token counts,
|
|
387
|
+
wall-clock time).
|
|
388
|
+
|
|
389
|
+
Raises:
|
|
390
|
+
ValidationError: If HITL request validation fails (re-raised).
|
|
391
|
+
"""
|
|
392
|
+
from langchain.agents.middleware.human_in_the_loop import (
|
|
393
|
+
ApproveDecision,
|
|
394
|
+
HITLRequest,
|
|
395
|
+
RejectDecision,
|
|
396
|
+
)
|
|
397
|
+
from langchain_core.messages import HumanMessage, ToolMessage
|
|
398
|
+
from langgraph.types import Command
|
|
399
|
+
from pydantic import ValidationError
|
|
400
|
+
|
|
401
|
+
hitl_request_adapter = _get_hitl_request_adapter(HITLRequest)
|
|
402
|
+
ask_user_adapter = _get_ask_user_adapter()
|
|
403
|
+
|
|
404
|
+
# Parse file mentions and inject content if any — offload blocking I/O
|
|
405
|
+
prompt_text, mentioned_files = await asyncio.to_thread(
|
|
406
|
+
parse_file_mentions, user_input
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
# Max file size to embed inline (256KB, matching mistral-vibe)
|
|
410
|
+
# Larger files get a reference instead - use read_file tool to view them
|
|
411
|
+
max_embed_bytes = 256 * 1024
|
|
412
|
+
|
|
413
|
+
if mentioned_files:
|
|
414
|
+
context_parts = [prompt_text, "\n\n## Referenced Files\n"]
|
|
415
|
+
for file_path in mentioned_files:
|
|
416
|
+
try:
|
|
417
|
+
part = await asyncio.to_thread(
|
|
418
|
+
_read_mentioned_file, file_path, max_embed_bytes
|
|
419
|
+
)
|
|
420
|
+
context_parts.append(part)
|
|
421
|
+
except Exception as e: # noqa: BLE001 # Resilient adapter error handling
|
|
422
|
+
context_parts.append(
|
|
423
|
+
f"\n### {file_path.name}\n[Error reading file: {e}]"
|
|
424
|
+
)
|
|
425
|
+
final_input = "\n".join(context_parts)
|
|
426
|
+
else:
|
|
427
|
+
final_input = prompt_text
|
|
428
|
+
|
|
429
|
+
# Include images and videos in the message content
|
|
430
|
+
images_to_send = []
|
|
431
|
+
videos_to_send = []
|
|
432
|
+
if image_tracker:
|
|
433
|
+
images_to_send = image_tracker.get_images()
|
|
434
|
+
videos_to_send = image_tracker.get_videos()
|
|
435
|
+
if images_to_send or videos_to_send:
|
|
436
|
+
message_content = create_multimodal_content(
|
|
437
|
+
final_input, images_to_send, videos_to_send
|
|
438
|
+
)
|
|
439
|
+
else:
|
|
440
|
+
message_content = final_input
|
|
441
|
+
|
|
442
|
+
thread_id = session_state.thread_id
|
|
443
|
+
config = build_stream_config(thread_id, assistant_id, sandbox_type=sandbox_type)
|
|
444
|
+
|
|
445
|
+
await dispatch_hook("session.start", {"thread_id": thread_id})
|
|
446
|
+
|
|
447
|
+
captured_input_tokens = 0
|
|
448
|
+
captured_output_tokens = 0
|
|
449
|
+
if turn_stats is None:
|
|
450
|
+
turn_stats = SessionStats()
|
|
451
|
+
start_time = time.monotonic()
|
|
452
|
+
|
|
453
|
+
# Warn if token display callbacks are only partially wired — all three
|
|
454
|
+
# should be set together to avoid inconsistent status-bar behavior.
|
|
455
|
+
token_cbs = (
|
|
456
|
+
adapter._on_tokens_update,
|
|
457
|
+
adapter._on_tokens_hide,
|
|
458
|
+
adapter._on_tokens_show,
|
|
459
|
+
)
|
|
460
|
+
if any(token_cbs) and not all(token_cbs):
|
|
461
|
+
logger.warning(
|
|
462
|
+
"Token callbacks partially wired (update=%s, hide=%s, show=%s); "
|
|
463
|
+
"token display may behave inconsistently",
|
|
464
|
+
adapter._on_tokens_update is not None,
|
|
465
|
+
adapter._on_tokens_hide is not None,
|
|
466
|
+
adapter._on_tokens_show is not None,
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
# Show spinner
|
|
470
|
+
if adapter._set_spinner:
|
|
471
|
+
await adapter._set_spinner("Thinking")
|
|
472
|
+
|
|
473
|
+
# Hide token display during streaming (will be shown with accurate count at end)
|
|
474
|
+
if adapter._on_tokens_hide:
|
|
475
|
+
adapter._on_tokens_hide()
|
|
476
|
+
|
|
477
|
+
file_op_tracker = FileOpTracker(assistant_id=assistant_id, backend=backend)
|
|
478
|
+
displayed_tool_ids: set[str] = set()
|
|
479
|
+
tool_call_buffers: dict[str | int, dict] = {}
|
|
480
|
+
|
|
481
|
+
# Track pending text and assistant messages PER NAMESPACE to avoid interleaving
|
|
482
|
+
# when multiple subagents stream in parallel
|
|
483
|
+
pending_text_by_namespace: dict[tuple, str] = {}
|
|
484
|
+
assistant_message_by_namespace: dict[tuple, Any] = {}
|
|
485
|
+
|
|
486
|
+
# Clear media from tracker after creating the message
|
|
487
|
+
if image_tracker:
|
|
488
|
+
image_tracker.clear()
|
|
489
|
+
|
|
490
|
+
user_msg: dict[str, Any] = {"role": "user", "content": message_content}
|
|
491
|
+
if message_kwargs:
|
|
492
|
+
user_msg.update(message_kwargs)
|
|
493
|
+
stream_input: dict | Command = {"messages": [user_msg]}
|
|
494
|
+
|
|
495
|
+
# Track summarization lifecycle so spinner status and notification stay in sync.
|
|
496
|
+
summarization_in_progress = False
|
|
497
|
+
|
|
498
|
+
try:
|
|
499
|
+
while True:
|
|
500
|
+
interrupt_occurred = False
|
|
501
|
+
suppress_resumed_output = False
|
|
502
|
+
pending_interrupts: dict[str, HITLRequest] = {}
|
|
503
|
+
pending_ask_user: dict[str, AskUserRequest] = {}
|
|
504
|
+
|
|
505
|
+
async for chunk in agent.astream(
|
|
506
|
+
stream_input,
|
|
507
|
+
stream_mode=["messages", "updates"],
|
|
508
|
+
subgraphs=True,
|
|
509
|
+
config=config,
|
|
510
|
+
context=context,
|
|
511
|
+
durability="exit",
|
|
512
|
+
):
|
|
513
|
+
if not isinstance(chunk, tuple) or len(chunk) != 3: # noqa: PLR2004 # stream chunk is a 3-tuple (namespace, mode, data)
|
|
514
|
+
logger.debug("Skipping non-3-tuple chunk: %s", type(chunk).__name__)
|
|
515
|
+
continue
|
|
516
|
+
|
|
517
|
+
namespace, current_stream_mode, data = chunk
|
|
518
|
+
|
|
519
|
+
# Convert namespace to hashable tuple for dict keys
|
|
520
|
+
ns_key = tuple(namespace) if namespace else ()
|
|
521
|
+
|
|
522
|
+
# Filter out subagent outputs - only show main agent (empty
|
|
523
|
+
# namespace). Subagents run via Task tool and should only
|
|
524
|
+
# report back to the main agent
|
|
525
|
+
is_main_agent = ns_key == ()
|
|
526
|
+
|
|
527
|
+
# Handle UPDATES stream - for interrupts and todos
|
|
528
|
+
if current_stream_mode == "updates":
|
|
529
|
+
if not isinstance(data, dict):
|
|
530
|
+
continue
|
|
531
|
+
|
|
532
|
+
# Check for interrupts
|
|
533
|
+
if "__interrupt__" in data:
|
|
534
|
+
interrupts: list[Interrupt] = data["__interrupt__"]
|
|
535
|
+
if interrupts:
|
|
536
|
+
for interrupt_obj in interrupts:
|
|
537
|
+
iv = interrupt_obj.value
|
|
538
|
+
if (
|
|
539
|
+
isinstance(iv, dict)
|
|
540
|
+
and iv.get("type") == "ask_user"
|
|
541
|
+
):
|
|
542
|
+
try:
|
|
543
|
+
validated_ask_user = (
|
|
544
|
+
ask_user_adapter.validate_python(iv)
|
|
545
|
+
)
|
|
546
|
+
pending_ask_user[interrupt_obj.id] = (
|
|
547
|
+
validated_ask_user
|
|
548
|
+
)
|
|
549
|
+
interrupt_occurred = True
|
|
550
|
+
await dispatch_hook("input.required", {})
|
|
551
|
+
except ValidationError:
|
|
552
|
+
logger.exception(
|
|
553
|
+
"Invalid ask_user interrupt payload"
|
|
554
|
+
)
|
|
555
|
+
raise
|
|
556
|
+
else:
|
|
557
|
+
try:
|
|
558
|
+
validated_request = (
|
|
559
|
+
hitl_request_adapter.validate_python(iv)
|
|
560
|
+
)
|
|
561
|
+
pending_interrupts[interrupt_obj.id] = (
|
|
562
|
+
validated_request
|
|
563
|
+
)
|
|
564
|
+
interrupt_occurred = True
|
|
565
|
+
await dispatch_hook("input.required", {})
|
|
566
|
+
except ValidationError: # noqa: TRY203 # Re-raise preserves exception context in handler
|
|
567
|
+
raise
|
|
568
|
+
|
|
569
|
+
# Check for todo updates (not yet implemented in Textual UI)
|
|
570
|
+
chunk_data = next(iter(data.values())) if data else None
|
|
571
|
+
if (
|
|
572
|
+
chunk_data
|
|
573
|
+
and isinstance(chunk_data, dict)
|
|
574
|
+
and "todos" in chunk_data
|
|
575
|
+
):
|
|
576
|
+
pass # Future: render todo list widget
|
|
577
|
+
|
|
578
|
+
# Handle MESSAGES stream - for content and tool calls
|
|
579
|
+
elif current_stream_mode == "messages":
|
|
580
|
+
# Skip subagent outputs - only render main agent content in chat
|
|
581
|
+
if not is_main_agent:
|
|
582
|
+
logger.debug("Skipping subagent message ns=%s", ns_key)
|
|
583
|
+
continue
|
|
584
|
+
|
|
585
|
+
if not isinstance(data, tuple) or len(data) != 2: # noqa: PLR2004 # message stream data is a 2-tuple (message, metadata)
|
|
586
|
+
logger.debug(
|
|
587
|
+
"Skipping non-2-tuple message data: type=%s",
|
|
588
|
+
type(data).__name__,
|
|
589
|
+
)
|
|
590
|
+
continue
|
|
591
|
+
|
|
592
|
+
message, metadata = data
|
|
593
|
+
logger.debug(
|
|
594
|
+
"Processing message: type=%s id=%s has_content_blocks=%s",
|
|
595
|
+
type(message).__name__,
|
|
596
|
+
getattr(message, "id", None),
|
|
597
|
+
hasattr(message, "content_blocks"),
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
# Filter out summarization model output, but keep UI feedback.
|
|
601
|
+
# The summarization model streams AIMessage chunks tagged
|
|
602
|
+
# with lc_source="summarization" in the callback metadata.
|
|
603
|
+
# These are hidden from the user; only the spinner and a
|
|
604
|
+
# notification widget provide feedback.
|
|
605
|
+
if _is_summarization_chunk(metadata):
|
|
606
|
+
if not summarization_in_progress:
|
|
607
|
+
summarization_in_progress = True
|
|
608
|
+
if adapter._set_spinner:
|
|
609
|
+
await adapter._set_spinner("Offloading")
|
|
610
|
+
continue
|
|
611
|
+
|
|
612
|
+
# Regular (non-summarization) chunks resumed — summarization
|
|
613
|
+
# has finished. Mount the notification and reset the spinner.
|
|
614
|
+
if summarization_in_progress:
|
|
615
|
+
summarization_in_progress = False
|
|
616
|
+
try:
|
|
617
|
+
await adapter._mount_message(SummarizationMessage())
|
|
618
|
+
except Exception:
|
|
619
|
+
logger.debug(
|
|
620
|
+
"Failed to mount summarization notification",
|
|
621
|
+
exc_info=True,
|
|
622
|
+
)
|
|
623
|
+
if adapter._set_spinner and not adapter._current_tool_messages:
|
|
624
|
+
await adapter._set_spinner("Thinking")
|
|
625
|
+
|
|
626
|
+
if isinstance(message, HumanMessage):
|
|
627
|
+
content = message.text
|
|
628
|
+
# Flush pending text for this namespace
|
|
629
|
+
pending_text = pending_text_by_namespace.get(ns_key, "")
|
|
630
|
+
if content and pending_text:
|
|
631
|
+
await _flush_assistant_text_ns(
|
|
632
|
+
adapter,
|
|
633
|
+
pending_text,
|
|
634
|
+
ns_key,
|
|
635
|
+
assistant_message_by_namespace,
|
|
636
|
+
)
|
|
637
|
+
pending_text_by_namespace[ns_key] = ""
|
|
638
|
+
continue
|
|
639
|
+
|
|
640
|
+
if isinstance(message, ToolMessage):
|
|
641
|
+
tool_name = getattr(message, "name", "")
|
|
642
|
+
tool_status = getattr(message, "status", "success")
|
|
643
|
+
tool_content = format_tool_message_content(message.content)
|
|
644
|
+
record = file_op_tracker.complete_with_message(message)
|
|
645
|
+
|
|
646
|
+
# Update tool call status with output
|
|
647
|
+
tool_id = getattr(message, "tool_call_id", None)
|
|
648
|
+
if tool_id and tool_id in adapter._current_tool_messages:
|
|
649
|
+
# Pop before widget calls so the dict drains even
|
|
650
|
+
# if set_success/set_error raises.
|
|
651
|
+
tool_msg = adapter._current_tool_messages.pop(tool_id)
|
|
652
|
+
output_str = str(tool_content) if tool_content else ""
|
|
653
|
+
if tool_status == "success":
|
|
654
|
+
tool_msg.set_success(output_str)
|
|
655
|
+
else:
|
|
656
|
+
tool_msg.set_error(output_str or "Error")
|
|
657
|
+
await dispatch_hook(
|
|
658
|
+
"tool.error",
|
|
659
|
+
{"tool_names": [tool_msg._tool_name]},
|
|
660
|
+
)
|
|
661
|
+
elif tool_id:
|
|
662
|
+
logger.debug(
|
|
663
|
+
"ToolMessage tool_call_id=%s not in "
|
|
664
|
+
"_current_tool_messages; spinner gating "
|
|
665
|
+
"may be stale",
|
|
666
|
+
tool_id,
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
# Reshow spinner only when all in-flight tools have
|
|
670
|
+
# completed (avoids premature "Thinking..." when
|
|
671
|
+
# parallel tool calls are active).
|
|
672
|
+
if adapter._set_spinner and not adapter._current_tool_messages:
|
|
673
|
+
await adapter._set_spinner("Thinking")
|
|
674
|
+
|
|
675
|
+
# Show file operation results - always show diffs in chat
|
|
676
|
+
if record:
|
|
677
|
+
pending_text = pending_text_by_namespace.get(ns_key, "")
|
|
678
|
+
if pending_text:
|
|
679
|
+
await _flush_assistant_text_ns(
|
|
680
|
+
adapter,
|
|
681
|
+
pending_text,
|
|
682
|
+
ns_key,
|
|
683
|
+
assistant_message_by_namespace,
|
|
684
|
+
)
|
|
685
|
+
pending_text_by_namespace[ns_key] = ""
|
|
686
|
+
if record.diff:
|
|
687
|
+
await adapter._mount_message(
|
|
688
|
+
DiffMessage(record.diff, record.display_path)
|
|
689
|
+
)
|
|
690
|
+
continue
|
|
691
|
+
|
|
692
|
+
# Extract token usage (before content_blocks check
|
|
693
|
+
# - usage may be on any chunk)
|
|
694
|
+
if hasattr(message, "usage_metadata"):
|
|
695
|
+
usage = message.usage_metadata
|
|
696
|
+
if usage:
|
|
697
|
+
input_toks = usage.get("input_tokens", 0)
|
|
698
|
+
output_toks = usage.get("output_tokens", 0)
|
|
699
|
+
total_toks = usage.get("total_tokens", 0)
|
|
700
|
+
from docagent_cli.config import settings
|
|
701
|
+
|
|
702
|
+
active_model = settings.model_name or ""
|
|
703
|
+
if input_toks or output_toks:
|
|
704
|
+
# Model gives split counts — preferred path
|
|
705
|
+
turn_stats.record_request(
|
|
706
|
+
active_model, input_toks, output_toks
|
|
707
|
+
)
|
|
708
|
+
captured_input_tokens = max(
|
|
709
|
+
captured_input_tokens, input_toks + output_toks
|
|
710
|
+
)
|
|
711
|
+
elif total_toks:
|
|
712
|
+
# Fallback: model gives only total (no split)
|
|
713
|
+
turn_stats.record_request(active_model, total_toks, 0)
|
|
714
|
+
captured_input_tokens = max(
|
|
715
|
+
captured_input_tokens, total_toks
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
# Check if this is an AIMessageChunk with content
|
|
719
|
+
if not hasattr(message, "content_blocks"):
|
|
720
|
+
logger.debug(
|
|
721
|
+
"Message has no content_blocks: type=%s",
|
|
722
|
+
type(message).__name__,
|
|
723
|
+
)
|
|
724
|
+
continue
|
|
725
|
+
|
|
726
|
+
# Process content blocks
|
|
727
|
+
blocks = message.content_blocks
|
|
728
|
+
logger.debug(
|
|
729
|
+
"content_blocks count=%d blocks=%s",
|
|
730
|
+
len(blocks),
|
|
731
|
+
repr(blocks)[:500],
|
|
732
|
+
)
|
|
733
|
+
for block in blocks:
|
|
734
|
+
block_type = block.get("type")
|
|
735
|
+
|
|
736
|
+
if block_type == "text":
|
|
737
|
+
text = block.get("text", "")
|
|
738
|
+
if text:
|
|
739
|
+
# Track accumulated text for reference
|
|
740
|
+
pending_text = pending_text_by_namespace.get(ns_key, "")
|
|
741
|
+
pending_text += text
|
|
742
|
+
pending_text_by_namespace[ns_key] = pending_text
|
|
743
|
+
|
|
744
|
+
# Get or create assistant message for this namespace
|
|
745
|
+
current_msg = assistant_message_by_namespace.get(ns_key)
|
|
746
|
+
if current_msg is None:
|
|
747
|
+
# Hide spinner when assistant starts responding
|
|
748
|
+
if adapter._set_spinner:
|
|
749
|
+
await adapter._set_spinner(None)
|
|
750
|
+
msg_id = f"asst-{uuid.uuid4().hex[:8]}"
|
|
751
|
+
# Mark active BEFORE mounting so pruning
|
|
752
|
+
# (triggered by mount) won't remove it
|
|
753
|
+
# (_mount_message can trigger
|
|
754
|
+
# _prune_old_messages if the window exceeds
|
|
755
|
+
# WINDOW_SIZE.)
|
|
756
|
+
if adapter._set_active_message:
|
|
757
|
+
adapter._set_active_message(msg_id)
|
|
758
|
+
current_msg = AssistantMessage(id=msg_id)
|
|
759
|
+
await adapter._mount_message(current_msg)
|
|
760
|
+
assistant_message_by_namespace[ns_key] = current_msg
|
|
761
|
+
|
|
762
|
+
# Append just the new text chunk for smoother
|
|
763
|
+
# streaming (uses MarkdownStream internally for
|
|
764
|
+
# better performance)
|
|
765
|
+
await current_msg.append_content(text)
|
|
766
|
+
|
|
767
|
+
elif block_type in {"tool_call_chunk", "tool_call"}:
|
|
768
|
+
chunk_name = block.get("name")
|
|
769
|
+
chunk_args = block.get("args")
|
|
770
|
+
chunk_id = block.get("id")
|
|
771
|
+
chunk_index = block.get("index")
|
|
772
|
+
|
|
773
|
+
buffer_key: str | int
|
|
774
|
+
if chunk_index is not None:
|
|
775
|
+
buffer_key = chunk_index
|
|
776
|
+
elif chunk_id is not None:
|
|
777
|
+
buffer_key = chunk_id
|
|
778
|
+
else:
|
|
779
|
+
buffer_key = f"unknown-{len(tool_call_buffers)}"
|
|
780
|
+
|
|
781
|
+
buffer = tool_call_buffers.setdefault(
|
|
782
|
+
buffer_key,
|
|
783
|
+
{
|
|
784
|
+
"name": None,
|
|
785
|
+
"id": None,
|
|
786
|
+
"args": None,
|
|
787
|
+
"args_parts": [],
|
|
788
|
+
},
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
if chunk_name:
|
|
792
|
+
buffer["name"] = chunk_name
|
|
793
|
+
if chunk_id:
|
|
794
|
+
buffer["id"] = chunk_id
|
|
795
|
+
|
|
796
|
+
if isinstance(chunk_args, dict):
|
|
797
|
+
buffer["args"] = chunk_args
|
|
798
|
+
buffer["args_parts"] = []
|
|
799
|
+
elif isinstance(chunk_args, str):
|
|
800
|
+
if chunk_args:
|
|
801
|
+
parts: list[str] = buffer.setdefault(
|
|
802
|
+
"args_parts", []
|
|
803
|
+
)
|
|
804
|
+
if not parts or chunk_args != parts[-1]:
|
|
805
|
+
parts.append(chunk_args)
|
|
806
|
+
buffer["args"] = "".join(parts)
|
|
807
|
+
elif chunk_args is not None:
|
|
808
|
+
buffer["args"] = chunk_args
|
|
809
|
+
|
|
810
|
+
buffer_name = buffer.get("name")
|
|
811
|
+
buffer_id = buffer.get("id")
|
|
812
|
+
if buffer_name is None:
|
|
813
|
+
continue
|
|
814
|
+
|
|
815
|
+
parsed_args = buffer.get("args")
|
|
816
|
+
if isinstance(parsed_args, str):
|
|
817
|
+
if not parsed_args:
|
|
818
|
+
continue
|
|
819
|
+
try:
|
|
820
|
+
parsed_args = json.loads(parsed_args)
|
|
821
|
+
except json.JSONDecodeError:
|
|
822
|
+
continue
|
|
823
|
+
elif parsed_args is None:
|
|
824
|
+
continue
|
|
825
|
+
|
|
826
|
+
if not isinstance(parsed_args, dict):
|
|
827
|
+
parsed_args = {"value": parsed_args}
|
|
828
|
+
|
|
829
|
+
# Flush pending text before tool call
|
|
830
|
+
pending_text = pending_text_by_namespace.get(ns_key, "")
|
|
831
|
+
if pending_text:
|
|
832
|
+
await _flush_assistant_text_ns(
|
|
833
|
+
adapter,
|
|
834
|
+
pending_text,
|
|
835
|
+
ns_key,
|
|
836
|
+
assistant_message_by_namespace,
|
|
837
|
+
)
|
|
838
|
+
pending_text_by_namespace[ns_key] = ""
|
|
839
|
+
assistant_message_by_namespace.pop(ns_key, None)
|
|
840
|
+
|
|
841
|
+
logger.debug(
|
|
842
|
+
"Tool call buffer: name=%s id=%s args=%s",
|
|
843
|
+
buffer_name,
|
|
844
|
+
buffer_id,
|
|
845
|
+
repr(parsed_args)[:200],
|
|
846
|
+
)
|
|
847
|
+
if (
|
|
848
|
+
buffer_id is not None
|
|
849
|
+
and buffer_id not in displayed_tool_ids
|
|
850
|
+
):
|
|
851
|
+
displayed_tool_ids.add(buffer_id)
|
|
852
|
+
file_op_tracker.start_operation(
|
|
853
|
+
buffer_name, parsed_args, buffer_id
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
# Hide spinner before showing tool call
|
|
857
|
+
if adapter._set_spinner:
|
|
858
|
+
await adapter._set_spinner(None)
|
|
859
|
+
|
|
860
|
+
# Mount tool call message
|
|
861
|
+
logger.debug(
|
|
862
|
+
"Mounting ToolCallMessage: %s(%s)",
|
|
863
|
+
buffer_name,
|
|
864
|
+
repr(parsed_args)[:200],
|
|
865
|
+
)
|
|
866
|
+
tool_msg = ToolCallMessage(buffer_name, parsed_args)
|
|
867
|
+
await adapter._mount_message(tool_msg)
|
|
868
|
+
adapter._current_tool_messages[buffer_id] = tool_msg
|
|
869
|
+
|
|
870
|
+
tool_call_buffers.pop(buffer_key, None)
|
|
871
|
+
|
|
872
|
+
if getattr(message, "chunk_position", None) == "last":
|
|
873
|
+
pending_text = pending_text_by_namespace.get(ns_key, "")
|
|
874
|
+
if pending_text:
|
|
875
|
+
await _flush_assistant_text_ns(
|
|
876
|
+
adapter,
|
|
877
|
+
pending_text,
|
|
878
|
+
ns_key,
|
|
879
|
+
assistant_message_by_namespace,
|
|
880
|
+
)
|
|
881
|
+
pending_text_by_namespace[ns_key] = ""
|
|
882
|
+
assistant_message_by_namespace.pop(ns_key, None)
|
|
883
|
+
|
|
884
|
+
# Reset summarization state if stream ended mid-summarization
|
|
885
|
+
# (e.g. middleware error, stream exhausted before regular chunks).
|
|
886
|
+
if summarization_in_progress:
|
|
887
|
+
summarization_in_progress = False
|
|
888
|
+
try:
|
|
889
|
+
await adapter._mount_message(SummarizationMessage())
|
|
890
|
+
except Exception:
|
|
891
|
+
logger.debug(
|
|
892
|
+
"Failed to mount summarization notification",
|
|
893
|
+
exc_info=True,
|
|
894
|
+
)
|
|
895
|
+
if adapter._set_spinner and not adapter._current_tool_messages:
|
|
896
|
+
await adapter._set_spinner("Thinking")
|
|
897
|
+
|
|
898
|
+
# Flush any remaining text from all namespaces
|
|
899
|
+
for ns_key, pending_text in list(pending_text_by_namespace.items()):
|
|
900
|
+
if pending_text:
|
|
901
|
+
await _flush_assistant_text_ns(
|
|
902
|
+
adapter, pending_text, ns_key, assistant_message_by_namespace
|
|
903
|
+
)
|
|
904
|
+
pending_text_by_namespace.clear()
|
|
905
|
+
assistant_message_by_namespace.clear()
|
|
906
|
+
|
|
907
|
+
# Handle HITL after stream completes
|
|
908
|
+
if interrupt_occurred:
|
|
909
|
+
any_rejected = False
|
|
910
|
+
resume_payload: dict[str, Any] = {}
|
|
911
|
+
|
|
912
|
+
for interrupt_id, ask_req in list(pending_ask_user.items()):
|
|
913
|
+
questions = ask_req["questions"]
|
|
914
|
+
|
|
915
|
+
if adapter._request_ask_user:
|
|
916
|
+
if adapter._set_spinner:
|
|
917
|
+
await adapter._set_spinner(None)
|
|
918
|
+
result: dict[str, Any] = {
|
|
919
|
+
"type": "error",
|
|
920
|
+
"error": "ask_user callback returned no response",
|
|
921
|
+
}
|
|
922
|
+
try:
|
|
923
|
+
future = await adapter._request_ask_user(questions)
|
|
924
|
+
except Exception:
|
|
925
|
+
logger.exception("Failed to mount ask_user widget")
|
|
926
|
+
result = {
|
|
927
|
+
"type": "error",
|
|
928
|
+
"error": "failed to display ask_user prompt",
|
|
929
|
+
}
|
|
930
|
+
future = None
|
|
931
|
+
|
|
932
|
+
if future is None:
|
|
933
|
+
logger.error(
|
|
934
|
+
"ask_user callback returned no Future; "
|
|
935
|
+
"reporting as error"
|
|
936
|
+
)
|
|
937
|
+
else:
|
|
938
|
+
try:
|
|
939
|
+
future_result = await future
|
|
940
|
+
if isinstance(future_result, dict):
|
|
941
|
+
result = future_result
|
|
942
|
+
else:
|
|
943
|
+
logger.error(
|
|
944
|
+
"ask_user future returned non-dict result: %s",
|
|
945
|
+
type(future_result).__name__,
|
|
946
|
+
)
|
|
947
|
+
result = {
|
|
948
|
+
"type": "error",
|
|
949
|
+
"error": "invalid ask_user widget result",
|
|
950
|
+
}
|
|
951
|
+
except Exception:
|
|
952
|
+
logger.exception(
|
|
953
|
+
"ask_user future resolution failed; "
|
|
954
|
+
"reporting as error"
|
|
955
|
+
)
|
|
956
|
+
result = {
|
|
957
|
+
"type": "error",
|
|
958
|
+
"error": "failed to receive ask_user response",
|
|
959
|
+
}
|
|
960
|
+
|
|
961
|
+
result_type = result.get("type")
|
|
962
|
+
if result_type == "answered":
|
|
963
|
+
answers = result.get("answers", [])
|
|
964
|
+
if isinstance(answers, list):
|
|
965
|
+
resume_payload[interrupt_id] = {"answers": answers}
|
|
966
|
+
tool_id = ask_req["tool_call_id"]
|
|
967
|
+
if tool_id in adapter._current_tool_messages:
|
|
968
|
+
tool_msg = adapter._current_tool_messages[tool_id]
|
|
969
|
+
tool_msg.set_success("User answered")
|
|
970
|
+
adapter._current_tool_messages.pop(tool_id, None)
|
|
971
|
+
else:
|
|
972
|
+
logger.error(
|
|
973
|
+
"ask_user answered payload had non-list "
|
|
974
|
+
"answers: %s",
|
|
975
|
+
type(answers).__name__,
|
|
976
|
+
)
|
|
977
|
+
resume_payload[interrupt_id] = {
|
|
978
|
+
"status": "error",
|
|
979
|
+
"error": "invalid ask_user answers payload",
|
|
980
|
+
"answers": ["" for _ in questions],
|
|
981
|
+
}
|
|
982
|
+
any_rejected = True
|
|
983
|
+
elif result_type == "cancelled":
|
|
984
|
+
resume_payload[interrupt_id] = {
|
|
985
|
+
"status": "cancelled",
|
|
986
|
+
"answers": ["" for _ in questions],
|
|
987
|
+
}
|
|
988
|
+
any_rejected = True
|
|
989
|
+
else:
|
|
990
|
+
error_text = result.get("error")
|
|
991
|
+
if not isinstance(error_text, str) or not error_text:
|
|
992
|
+
error_text = "ask_user interaction failed"
|
|
993
|
+
resume_payload[interrupt_id] = {
|
|
994
|
+
"status": "error",
|
|
995
|
+
"error": error_text,
|
|
996
|
+
"answers": ["" for _ in questions],
|
|
997
|
+
}
|
|
998
|
+
any_rejected = True
|
|
999
|
+
else:
|
|
1000
|
+
logger.warning(
|
|
1001
|
+
"ask_user interrupt received but no UI callback is "
|
|
1002
|
+
"registered; reporting as error"
|
|
1003
|
+
)
|
|
1004
|
+
resume_payload[interrupt_id] = {
|
|
1005
|
+
"status": "error",
|
|
1006
|
+
"error": "ask_user not supported by this UI",
|
|
1007
|
+
"answers": ["" for _ in questions],
|
|
1008
|
+
}
|
|
1009
|
+
|
|
1010
|
+
for interrupt_id, hitl_request in list(pending_interrupts.items()):
|
|
1011
|
+
action_requests = hitl_request["action_requests"]
|
|
1012
|
+
|
|
1013
|
+
if session_state.auto_approve:
|
|
1014
|
+
decisions: list[HITLDecision] = [
|
|
1015
|
+
ApproveDecision(type="approve") for _ in action_requests
|
|
1016
|
+
]
|
|
1017
|
+
resume_payload[interrupt_id] = {"decisions": decisions}
|
|
1018
|
+
for tool_msg in list(adapter._current_tool_messages.values()):
|
|
1019
|
+
tool_msg.set_running()
|
|
1020
|
+
else:
|
|
1021
|
+
# Batch approval - one dialog for all parallel tool calls
|
|
1022
|
+
await dispatch_hook(
|
|
1023
|
+
"permission.request",
|
|
1024
|
+
{
|
|
1025
|
+
"tool_names": [
|
|
1026
|
+
r.get("name", "") for r in action_requests
|
|
1027
|
+
]
|
|
1028
|
+
},
|
|
1029
|
+
)
|
|
1030
|
+
future = await adapter._request_approval(
|
|
1031
|
+
action_requests, assistant_id
|
|
1032
|
+
)
|
|
1033
|
+
decision = await future
|
|
1034
|
+
|
|
1035
|
+
if isinstance(decision, dict):
|
|
1036
|
+
decision_type = decision.get("type")
|
|
1037
|
+
|
|
1038
|
+
if decision_type == "auto_approve_all":
|
|
1039
|
+
session_state.auto_approve = True
|
|
1040
|
+
if adapter._on_auto_approve_enabled:
|
|
1041
|
+
adapter._on_auto_approve_enabled()
|
|
1042
|
+
decisions = [
|
|
1043
|
+
ApproveDecision(type="approve")
|
|
1044
|
+
for _ in action_requests
|
|
1045
|
+
]
|
|
1046
|
+
tool_msgs = list(
|
|
1047
|
+
adapter._current_tool_messages.values()
|
|
1048
|
+
)
|
|
1049
|
+
for tool_msg in tool_msgs:
|
|
1050
|
+
tool_msg.set_running()
|
|
1051
|
+
for action_request in action_requests:
|
|
1052
|
+
tool_name = action_request.get("name")
|
|
1053
|
+
if tool_name in {
|
|
1054
|
+
"write_file",
|
|
1055
|
+
"edit_file",
|
|
1056
|
+
}:
|
|
1057
|
+
args = action_request.get("args", {})
|
|
1058
|
+
if isinstance(args, dict):
|
|
1059
|
+
file_op_tracker.mark_hitl_approved(
|
|
1060
|
+
tool_name, args
|
|
1061
|
+
)
|
|
1062
|
+
|
|
1063
|
+
elif decision_type == "approve":
|
|
1064
|
+
decisions = [
|
|
1065
|
+
ApproveDecision(type="approve")
|
|
1066
|
+
for _ in action_requests
|
|
1067
|
+
]
|
|
1068
|
+
tool_msgs = list(
|
|
1069
|
+
adapter._current_tool_messages.values()
|
|
1070
|
+
)
|
|
1071
|
+
for tool_msg in tool_msgs:
|
|
1072
|
+
tool_msg.set_running()
|
|
1073
|
+
for action_request in action_requests:
|
|
1074
|
+
tool_name = action_request.get("name")
|
|
1075
|
+
if tool_name in {
|
|
1076
|
+
"write_file",
|
|
1077
|
+
"edit_file",
|
|
1078
|
+
}:
|
|
1079
|
+
args = action_request.get("args", {})
|
|
1080
|
+
if isinstance(args, dict):
|
|
1081
|
+
file_op_tracker.mark_hitl_approved(
|
|
1082
|
+
tool_name, args
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
elif decision_type == "reject":
|
|
1086
|
+
decisions = [
|
|
1087
|
+
RejectDecision(type="reject")
|
|
1088
|
+
for _ in action_requests
|
|
1089
|
+
]
|
|
1090
|
+
tool_msgs = list(
|
|
1091
|
+
adapter._current_tool_messages.values()
|
|
1092
|
+
)
|
|
1093
|
+
for tool_msg in tool_msgs:
|
|
1094
|
+
tool_msg.set_rejected()
|
|
1095
|
+
adapter._current_tool_messages.clear()
|
|
1096
|
+
any_rejected = True
|
|
1097
|
+
else:
|
|
1098
|
+
logger.warning(
|
|
1099
|
+
"Unexpected HITL decision type: %s",
|
|
1100
|
+
decision_type,
|
|
1101
|
+
)
|
|
1102
|
+
decisions = [
|
|
1103
|
+
RejectDecision(type="reject")
|
|
1104
|
+
for _ in action_requests
|
|
1105
|
+
]
|
|
1106
|
+
for tool_msg in list(
|
|
1107
|
+
adapter._current_tool_messages.values()
|
|
1108
|
+
):
|
|
1109
|
+
tool_msg.set_rejected()
|
|
1110
|
+
adapter._current_tool_messages.clear()
|
|
1111
|
+
any_rejected = True
|
|
1112
|
+
else:
|
|
1113
|
+
logger.warning(
|
|
1114
|
+
"HITL decision was not a dict: %s",
|
|
1115
|
+
type(decision).__name__,
|
|
1116
|
+
)
|
|
1117
|
+
decisions = [
|
|
1118
|
+
RejectDecision(type="reject") for _ in action_requests
|
|
1119
|
+
]
|
|
1120
|
+
for tool_msg in list(
|
|
1121
|
+
adapter._current_tool_messages.values()
|
|
1122
|
+
):
|
|
1123
|
+
tool_msg.set_rejected()
|
|
1124
|
+
adapter._current_tool_messages.clear()
|
|
1125
|
+
any_rejected = True
|
|
1126
|
+
|
|
1127
|
+
resume_payload[interrupt_id] = {"decisions": decisions}
|
|
1128
|
+
|
|
1129
|
+
if any_rejected:
|
|
1130
|
+
break
|
|
1131
|
+
|
|
1132
|
+
suppress_resumed_output = any_rejected
|
|
1133
|
+
|
|
1134
|
+
if interrupt_occurred and resume_payload:
|
|
1135
|
+
if suppress_resumed_output and not pending_ask_user:
|
|
1136
|
+
await adapter._mount_message(
|
|
1137
|
+
AppMessage(
|
|
1138
|
+
"Command rejected. Tell the agent what you'd like instead."
|
|
1139
|
+
)
|
|
1140
|
+
)
|
|
1141
|
+
turn_stats.wall_time_seconds = time.monotonic() - start_time
|
|
1142
|
+
return turn_stats
|
|
1143
|
+
|
|
1144
|
+
stream_input = Command(resume=resume_payload)
|
|
1145
|
+
else:
|
|
1146
|
+
await dispatch_hook("task.complete", {"thread_id": thread_id})
|
|
1147
|
+
break
|
|
1148
|
+
|
|
1149
|
+
except asyncio.CancelledError:
|
|
1150
|
+
# Clear active message immediately so it won't block pruning
|
|
1151
|
+
# If we don't do this, the store still thinks it's actice and protects
|
|
1152
|
+
# from pruning, which breaks get_messages_to_prune(), potentially
|
|
1153
|
+
# blocking all future pruning
|
|
1154
|
+
if adapter._set_active_message:
|
|
1155
|
+
adapter._set_active_message(None)
|
|
1156
|
+
|
|
1157
|
+
# Hide spinner (may still show "Offloading" if interrupted mid-offload)
|
|
1158
|
+
if adapter._set_spinner:
|
|
1159
|
+
await adapter._set_spinner(None)
|
|
1160
|
+
|
|
1161
|
+
await adapter._mount_message(AppMessage("Interrupted by user"))
|
|
1162
|
+
|
|
1163
|
+
# Save accumulated state before marking tools as rejected (best-effort)
|
|
1164
|
+
# State update failures shouldn't prevent cleanup
|
|
1165
|
+
try:
|
|
1166
|
+
interrupted_msg = _build_interrupted_ai_message(
|
|
1167
|
+
pending_text_by_namespace,
|
|
1168
|
+
adapter._current_tool_messages,
|
|
1169
|
+
)
|
|
1170
|
+
if interrupted_msg:
|
|
1171
|
+
await agent.aupdate_state(config, {"messages": [interrupted_msg]})
|
|
1172
|
+
|
|
1173
|
+
cancellation_msg = HumanMessage(
|
|
1174
|
+
content="[SYSTEM] Task interrupted by user. "
|
|
1175
|
+
"Previous operation was cancelled."
|
|
1176
|
+
)
|
|
1177
|
+
await agent.aupdate_state(config, {"messages": [cancellation_msg]})
|
|
1178
|
+
except Exception:
|
|
1179
|
+
logger.debug("Failed to save interrupted state", exc_info=True)
|
|
1180
|
+
|
|
1181
|
+
# Mark tools as rejected AFTER saving state
|
|
1182
|
+
for tool_msg in list(adapter._current_tool_messages.values()):
|
|
1183
|
+
tool_msg.set_rejected()
|
|
1184
|
+
adapter._current_tool_messages.clear()
|
|
1185
|
+
|
|
1186
|
+
# Report tokens even on interrupt (or restore display if none captured)
|
|
1187
|
+
turn_stats.wall_time_seconds = time.monotonic() - start_time
|
|
1188
|
+
await _report_and_persist_tokens(
|
|
1189
|
+
adapter,
|
|
1190
|
+
agent,
|
|
1191
|
+
config,
|
|
1192
|
+
captured_input_tokens,
|
|
1193
|
+
captured_output_tokens,
|
|
1194
|
+
shield=True,
|
|
1195
|
+
)
|
|
1196
|
+
return turn_stats
|
|
1197
|
+
|
|
1198
|
+
except KeyboardInterrupt:
|
|
1199
|
+
# Clear active message immediately so it won't block pruning
|
|
1200
|
+
# If we don't do this, the store still thinks it's actice and protects
|
|
1201
|
+
# from pruning, which breaks get_messages_to_prune(), potentially
|
|
1202
|
+
# blocking all future pruning
|
|
1203
|
+
if adapter._set_active_message:
|
|
1204
|
+
adapter._set_active_message(None)
|
|
1205
|
+
|
|
1206
|
+
# Hide spinner (may still show "Offloading" if interrupted mid-offload)
|
|
1207
|
+
if adapter._set_spinner:
|
|
1208
|
+
await adapter._set_spinner(None)
|
|
1209
|
+
|
|
1210
|
+
await adapter._mount_message(AppMessage("Interrupted by user"))
|
|
1211
|
+
|
|
1212
|
+
# Save accumulated state before marking tools as rejected (best-effort)
|
|
1213
|
+
# State update failures shouldn't prevent cleanup
|
|
1214
|
+
try:
|
|
1215
|
+
interrupted_msg = _build_interrupted_ai_message(
|
|
1216
|
+
pending_text_by_namespace,
|
|
1217
|
+
adapter._current_tool_messages,
|
|
1218
|
+
)
|
|
1219
|
+
if interrupted_msg:
|
|
1220
|
+
await agent.aupdate_state(config, {"messages": [interrupted_msg]})
|
|
1221
|
+
|
|
1222
|
+
cancellation_msg = HumanMessage(
|
|
1223
|
+
content="[SYSTEM] Task interrupted by user. "
|
|
1224
|
+
"Previous operation was cancelled."
|
|
1225
|
+
)
|
|
1226
|
+
await agent.aupdate_state(config, {"messages": [cancellation_msg]})
|
|
1227
|
+
except Exception:
|
|
1228
|
+
logger.debug("Failed to save interrupted state", exc_info=True)
|
|
1229
|
+
|
|
1230
|
+
# Mark tools as rejected AFTER saving state
|
|
1231
|
+
for tool_msg in list(adapter._current_tool_messages.values()):
|
|
1232
|
+
tool_msg.set_rejected()
|
|
1233
|
+
adapter._current_tool_messages.clear()
|
|
1234
|
+
|
|
1235
|
+
# Report tokens even on interrupt (or restore display if none captured)
|
|
1236
|
+
turn_stats.wall_time_seconds = time.monotonic() - start_time
|
|
1237
|
+
await _report_and_persist_tokens(
|
|
1238
|
+
adapter,
|
|
1239
|
+
agent,
|
|
1240
|
+
config,
|
|
1241
|
+
captured_input_tokens,
|
|
1242
|
+
captured_output_tokens,
|
|
1243
|
+
shield=True,
|
|
1244
|
+
)
|
|
1245
|
+
return turn_stats
|
|
1246
|
+
|
|
1247
|
+
# Update token count and return stats
|
|
1248
|
+
turn_stats.wall_time_seconds = time.monotonic() - start_time
|
|
1249
|
+
await _report_and_persist_tokens(
|
|
1250
|
+
adapter,
|
|
1251
|
+
agent,
|
|
1252
|
+
config,
|
|
1253
|
+
captured_input_tokens,
|
|
1254
|
+
captured_output_tokens,
|
|
1255
|
+
)
|
|
1256
|
+
return turn_stats
|
|
1257
|
+
|
|
1258
|
+
|
|
1259
|
+
async def _persist_context_tokens(
|
|
1260
|
+
agent: Any, # noqa: ANN401 # Dynamic agent graph type
|
|
1261
|
+
config: RunnableConfig,
|
|
1262
|
+
tokens: int,
|
|
1263
|
+
) -> None:
|
|
1264
|
+
"""Best-effort persist of the context token count into graph state.
|
|
1265
|
+
|
|
1266
|
+
Args:
|
|
1267
|
+
agent: The LangGraph agent (must support `aupdate_state`).
|
|
1268
|
+
config: Runnable config with `thread_id`.
|
|
1269
|
+
tokens: Total context tokens to persist.
|
|
1270
|
+
"""
|
|
1271
|
+
try:
|
|
1272
|
+
await agent.aupdate_state(config, {"_context_tokens": tokens})
|
|
1273
|
+
except Exception: # non-critical; stale count on resume is acceptable
|
|
1274
|
+
logger.warning(
|
|
1275
|
+
"Failed to persist _context_tokens=%d; token count may be stale on resume",
|
|
1276
|
+
tokens,
|
|
1277
|
+
exc_info=True,
|
|
1278
|
+
)
|
|
1279
|
+
|
|
1280
|
+
|
|
1281
|
+
async def _report_and_persist_tokens(
|
|
1282
|
+
adapter: TextualUIAdapter,
|
|
1283
|
+
agent: Any, # noqa: ANN401 # Dynamic agent graph type
|
|
1284
|
+
config: RunnableConfig,
|
|
1285
|
+
captured_input_tokens: int,
|
|
1286
|
+
captured_output_tokens: int,
|
|
1287
|
+
*,
|
|
1288
|
+
shield: bool = False,
|
|
1289
|
+
) -> None:
|
|
1290
|
+
"""Update the token display and best-effort persist to graph state.
|
|
1291
|
+
|
|
1292
|
+
Args:
|
|
1293
|
+
adapter: UI adapter with token callbacks.
|
|
1294
|
+
agent: The LangGraph agent.
|
|
1295
|
+
config: Runnable config with `thread_id` in its configurable dict.
|
|
1296
|
+
captured_input_tokens: Total input tokens captured during the turn.
|
|
1297
|
+
captured_output_tokens: Total output tokens captured during the turn.
|
|
1298
|
+
shield: When `True`, suppress all exceptions (including `BaseException`)
|
|
1299
|
+
from the persist call so that cancellation handlers can safely await
|
|
1300
|
+
this without re-raising.
|
|
1301
|
+
"""
|
|
1302
|
+
if captured_input_tokens or captured_output_tokens:
|
|
1303
|
+
if adapter._on_tokens_update:
|
|
1304
|
+
adapter._on_tokens_update(captured_input_tokens)
|
|
1305
|
+
if shield:
|
|
1306
|
+
with contextlib.suppress(BaseException):
|
|
1307
|
+
await _persist_context_tokens(agent, config, captured_input_tokens)
|
|
1308
|
+
else:
|
|
1309
|
+
await _persist_context_tokens(agent, config, captured_input_tokens)
|
|
1310
|
+
elif adapter._on_tokens_show:
|
|
1311
|
+
adapter._on_tokens_show()
|
|
1312
|
+
|
|
1313
|
+
|
|
1314
|
+
async def _flush_assistant_text_ns(
|
|
1315
|
+
adapter: TextualUIAdapter,
|
|
1316
|
+
text: str,
|
|
1317
|
+
ns_key: tuple,
|
|
1318
|
+
assistant_message_by_namespace: dict[tuple, Any],
|
|
1319
|
+
) -> None:
|
|
1320
|
+
"""Flush accumulated assistant text for a specific namespace.
|
|
1321
|
+
|
|
1322
|
+
Finalizes the streaming by stopping the MarkdownStream.
|
|
1323
|
+
If no message exists yet, creates one with the full content.
|
|
1324
|
+
"""
|
|
1325
|
+
if not text.strip():
|
|
1326
|
+
return
|
|
1327
|
+
|
|
1328
|
+
current_msg = assistant_message_by_namespace.get(ns_key)
|
|
1329
|
+
if current_msg is None:
|
|
1330
|
+
# No message was created during streaming - create one with full content
|
|
1331
|
+
msg_id = f"asst-{uuid.uuid4().hex[:8]}"
|
|
1332
|
+
current_msg = AssistantMessage(text, id=msg_id)
|
|
1333
|
+
await adapter._mount_message(current_msg)
|
|
1334
|
+
await current_msg.write_initial_content()
|
|
1335
|
+
assistant_message_by_namespace[ns_key] = current_msg
|
|
1336
|
+
else:
|
|
1337
|
+
# Stop the stream to finalize the content
|
|
1338
|
+
await current_msg.stop_stream()
|
|
1339
|
+
|
|
1340
|
+
# When the AssistantMessage was first mounted and recorded in the
|
|
1341
|
+
# MessageStore, it had empty content (streaming hadn't started yet).
|
|
1342
|
+
# Now that streaming is done, the widget holds the full text in
|
|
1343
|
+
# `_content`, but the store's MessageData still has `content=""`.
|
|
1344
|
+
# If the message is later pruned and re-hydrated, `to_widget()` would
|
|
1345
|
+
# recreate it from that stale empty string. This call copies the
|
|
1346
|
+
# widget's final content back into the store so re-hydration works.
|
|
1347
|
+
if adapter._sync_message_content and current_msg.id:
|
|
1348
|
+
adapter._sync_message_content(current_msg.id, current_msg._content)
|
|
1349
|
+
|
|
1350
|
+
# Clear active message since streaming is done
|
|
1351
|
+
if adapter._set_active_message:
|
|
1352
|
+
adapter._set_active_message(None)
|