ai-parrot 0.17.2__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentui/.prettierrc +15 -0
- agentui/QUICKSTART.md +272 -0
- agentui/README.md +59 -0
- agentui/env.example +16 -0
- agentui/jsconfig.json +14 -0
- agentui/package-lock.json +4242 -0
- agentui/package.json +34 -0
- agentui/scripts/postinstall/apply-patches.mjs +260 -0
- agentui/src/app.css +61 -0
- agentui/src/app.d.ts +13 -0
- agentui/src/app.html +12 -0
- agentui/src/components/LoadingSpinner.svelte +64 -0
- agentui/src/components/ThemeSwitcher.svelte +159 -0
- agentui/src/components/index.js +4 -0
- agentui/src/lib/api/bots.ts +60 -0
- agentui/src/lib/api/chat.ts +22 -0
- agentui/src/lib/api/http.ts +25 -0
- agentui/src/lib/components/BotCard.svelte +33 -0
- agentui/src/lib/components/ChatBubble.svelte +63 -0
- agentui/src/lib/components/Toast.svelte +21 -0
- agentui/src/lib/config.ts +20 -0
- agentui/src/lib/stores/auth.svelte.ts +73 -0
- agentui/src/lib/stores/theme.svelte.js +64 -0
- agentui/src/lib/stores/toast.svelte.ts +31 -0
- agentui/src/lib/utils/conversation.ts +39 -0
- agentui/src/routes/+layout.svelte +20 -0
- agentui/src/routes/+page.svelte +232 -0
- agentui/src/routes/login/+page.svelte +200 -0
- agentui/src/routes/talk/[agentId]/+page.svelte +297 -0
- agentui/src/routes/talk/[agentId]/+page.ts +7 -0
- agentui/static/README.md +1 -0
- agentui/svelte.config.js +11 -0
- agentui/tailwind.config.ts +53 -0
- agentui/tsconfig.json +3 -0
- agentui/vite.config.ts +10 -0
- ai_parrot-0.17.2.dist-info/METADATA +472 -0
- ai_parrot-0.17.2.dist-info/RECORD +535 -0
- ai_parrot-0.17.2.dist-info/WHEEL +6 -0
- ai_parrot-0.17.2.dist-info/entry_points.txt +2 -0
- ai_parrot-0.17.2.dist-info/licenses/LICENSE +21 -0
- ai_parrot-0.17.2.dist-info/top_level.txt +6 -0
- crew-builder/.prettierrc +15 -0
- crew-builder/QUICKSTART.md +259 -0
- crew-builder/README.md +113 -0
- crew-builder/env.example +17 -0
- crew-builder/jsconfig.json +14 -0
- crew-builder/package-lock.json +4182 -0
- crew-builder/package.json +37 -0
- crew-builder/scripts/postinstall/apply-patches.mjs +260 -0
- crew-builder/src/app.css +62 -0
- crew-builder/src/app.d.ts +13 -0
- crew-builder/src/app.html +12 -0
- crew-builder/src/components/LoadingSpinner.svelte +64 -0
- crew-builder/src/components/ThemeSwitcher.svelte +149 -0
- crew-builder/src/components/index.js +9 -0
- crew-builder/src/lib/api/bots.ts +60 -0
- crew-builder/src/lib/api/chat.ts +80 -0
- crew-builder/src/lib/api/client.ts +56 -0
- crew-builder/src/lib/api/crew/crew.ts +136 -0
- crew-builder/src/lib/api/index.ts +5 -0
- crew-builder/src/lib/api/o365/auth.ts +65 -0
- crew-builder/src/lib/auth/auth.ts +54 -0
- crew-builder/src/lib/components/AgentNode.svelte +43 -0
- crew-builder/src/lib/components/BotCard.svelte +33 -0
- crew-builder/src/lib/components/ChatBubble.svelte +67 -0
- crew-builder/src/lib/components/ConfigPanel.svelte +278 -0
- crew-builder/src/lib/components/JsonTreeNode.svelte +76 -0
- crew-builder/src/lib/components/JsonViewer.svelte +24 -0
- crew-builder/src/lib/components/MarkdownEditor.svelte +48 -0
- crew-builder/src/lib/components/ThemeToggle.svelte +36 -0
- crew-builder/src/lib/components/Toast.svelte +67 -0
- crew-builder/src/lib/components/Toolbar.svelte +157 -0
- crew-builder/src/lib/components/index.ts +10 -0
- crew-builder/src/lib/config.ts +8 -0
- crew-builder/src/lib/stores/auth.svelte.ts +228 -0
- crew-builder/src/lib/stores/crewStore.ts +369 -0
- crew-builder/src/lib/stores/theme.svelte.js +145 -0
- crew-builder/src/lib/stores/toast.svelte.ts +69 -0
- crew-builder/src/lib/utils/conversation.ts +39 -0
- crew-builder/src/lib/utils/markdown.ts +122 -0
- crew-builder/src/lib/utils/talkHistory.ts +47 -0
- crew-builder/src/routes/+layout.svelte +20 -0
- crew-builder/src/routes/+page.svelte +539 -0
- crew-builder/src/routes/agents/+page.svelte +247 -0
- crew-builder/src/routes/agents/[agentId]/+page.svelte +288 -0
- crew-builder/src/routes/agents/[agentId]/+page.ts +7 -0
- crew-builder/src/routes/builder/+page.svelte +204 -0
- crew-builder/src/routes/crew/ask/+page.svelte +1052 -0
- crew-builder/src/routes/crew/ask/+page.ts +1 -0
- crew-builder/src/routes/integrations/o365/+page.svelte +304 -0
- crew-builder/src/routes/login/+page.svelte +197 -0
- crew-builder/src/routes/talk/[agentId]/+page.svelte +487 -0
- crew-builder/src/routes/talk/[agentId]/+page.ts +7 -0
- crew-builder/static/README.md +1 -0
- crew-builder/svelte.config.js +11 -0
- crew-builder/tailwind.config.ts +53 -0
- crew-builder/tsconfig.json +3 -0
- crew-builder/vite.config.ts +10 -0
- mcp_servers/calculator_server.py +309 -0
- parrot/__init__.py +27 -0
- parrot/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/__pycache__/version.cpython-310.pyc +0 -0
- parrot/_version.py +34 -0
- parrot/a2a/__init__.py +48 -0
- parrot/a2a/client.py +658 -0
- parrot/a2a/discovery.py +89 -0
- parrot/a2a/mixin.py +257 -0
- parrot/a2a/models.py +376 -0
- parrot/a2a/server.py +770 -0
- parrot/agents/__init__.py +29 -0
- parrot/bots/__init__.py +12 -0
- parrot/bots/a2a_agent.py +19 -0
- parrot/bots/abstract.py +3139 -0
- parrot/bots/agent.py +1129 -0
- parrot/bots/basic.py +9 -0
- parrot/bots/chatbot.py +669 -0
- parrot/bots/data.py +1618 -0
- parrot/bots/database/__init__.py +5 -0
- parrot/bots/database/abstract.py +3071 -0
- parrot/bots/database/cache.py +286 -0
- parrot/bots/database/models.py +468 -0
- parrot/bots/database/prompts.py +154 -0
- parrot/bots/database/retries.py +98 -0
- parrot/bots/database/router.py +269 -0
- parrot/bots/database/sql.py +41 -0
- parrot/bots/db/__init__.py +6 -0
- parrot/bots/db/abstract.py +556 -0
- parrot/bots/db/bigquery.py +602 -0
- parrot/bots/db/cache.py +85 -0
- parrot/bots/db/documentdb.py +668 -0
- parrot/bots/db/elastic.py +1014 -0
- parrot/bots/db/influx.py +898 -0
- parrot/bots/db/mock.py +96 -0
- parrot/bots/db/multi.py +783 -0
- parrot/bots/db/prompts.py +185 -0
- parrot/bots/db/sql.py +1255 -0
- parrot/bots/db/tools.py +212 -0
- parrot/bots/document.py +680 -0
- parrot/bots/hrbot.py +15 -0
- parrot/bots/kb.py +170 -0
- parrot/bots/mcp.py +36 -0
- parrot/bots/orchestration/README.md +463 -0
- parrot/bots/orchestration/__init__.py +1 -0
- parrot/bots/orchestration/agent.py +155 -0
- parrot/bots/orchestration/crew.py +3330 -0
- parrot/bots/orchestration/fsm.py +1179 -0
- parrot/bots/orchestration/hr.py +434 -0
- parrot/bots/orchestration/storage/__init__.py +4 -0
- parrot/bots/orchestration/storage/memory.py +100 -0
- parrot/bots/orchestration/storage/mixin.py +119 -0
- parrot/bots/orchestration/verify.py +202 -0
- parrot/bots/product.py +204 -0
- parrot/bots/prompts/__init__.py +96 -0
- parrot/bots/prompts/agents.py +155 -0
- parrot/bots/prompts/data.py +216 -0
- parrot/bots/prompts/output_generation.py +8 -0
- parrot/bots/scraper/__init__.py +3 -0
- parrot/bots/scraper/models.py +122 -0
- parrot/bots/scraper/scraper.py +1173 -0
- parrot/bots/scraper/templates.py +115 -0
- parrot/bots/stores/__init__.py +5 -0
- parrot/bots/stores/local.py +172 -0
- parrot/bots/webdev.py +81 -0
- parrot/cli.py +17 -0
- parrot/clients/__init__.py +16 -0
- parrot/clients/base.py +1491 -0
- parrot/clients/claude.py +1191 -0
- parrot/clients/factory.py +129 -0
- parrot/clients/google.py +4567 -0
- parrot/clients/gpt.py +1975 -0
- parrot/clients/grok.py +432 -0
- parrot/clients/groq.py +986 -0
- parrot/clients/hf.py +582 -0
- parrot/clients/models.py +18 -0
- parrot/conf.py +395 -0
- parrot/embeddings/__init__.py +9 -0
- parrot/embeddings/base.py +157 -0
- parrot/embeddings/google.py +98 -0
- parrot/embeddings/huggingface.py +74 -0
- parrot/embeddings/openai.py +84 -0
- parrot/embeddings/processor.py +88 -0
- parrot/exceptions.c +13868 -0
- parrot/exceptions.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/exceptions.pxd +22 -0
- parrot/exceptions.pxi +15 -0
- parrot/exceptions.pyx +44 -0
- parrot/generators/__init__.py +29 -0
- parrot/generators/base.py +200 -0
- parrot/generators/html.py +293 -0
- parrot/generators/react.py +205 -0
- parrot/generators/streamlit.py +203 -0
- parrot/generators/template.py +105 -0
- parrot/handlers/__init__.py +4 -0
- parrot/handlers/agent.py +861 -0
- parrot/handlers/agents/__init__.py +1 -0
- parrot/handlers/agents/abstract.py +900 -0
- parrot/handlers/bots.py +338 -0
- parrot/handlers/chat.py +915 -0
- parrot/handlers/creation.sql +192 -0
- parrot/handlers/crew/ARCHITECTURE.md +362 -0
- parrot/handlers/crew/README_BOTMANAGER_PERSISTENCE.md +303 -0
- parrot/handlers/crew/README_REDIS_PERSISTENCE.md +366 -0
- parrot/handlers/crew/__init__.py +0 -0
- parrot/handlers/crew/handler.py +801 -0
- parrot/handlers/crew/models.py +229 -0
- parrot/handlers/crew/redis_persistence.py +523 -0
- parrot/handlers/jobs/__init__.py +10 -0
- parrot/handlers/jobs/job.py +384 -0
- parrot/handlers/jobs/mixin.py +627 -0
- parrot/handlers/jobs/models.py +115 -0
- parrot/handlers/jobs/worker.py +31 -0
- parrot/handlers/models.py +596 -0
- parrot/handlers/o365_auth.py +105 -0
- parrot/handlers/stream.py +337 -0
- parrot/interfaces/__init__.py +6 -0
- parrot/interfaces/aws.py +143 -0
- parrot/interfaces/credentials.py +113 -0
- parrot/interfaces/database.py +27 -0
- parrot/interfaces/google.py +1123 -0
- parrot/interfaces/hierarchy.py +1227 -0
- parrot/interfaces/http.py +651 -0
- parrot/interfaces/images/__init__.py +0 -0
- parrot/interfaces/images/plugins/__init__.py +24 -0
- parrot/interfaces/images/plugins/abstract.py +58 -0
- parrot/interfaces/images/plugins/analisys.py +148 -0
- parrot/interfaces/images/plugins/classify.py +150 -0
- parrot/interfaces/images/plugins/classifybase.py +182 -0
- parrot/interfaces/images/plugins/detect.py +150 -0
- parrot/interfaces/images/plugins/exif.py +1103 -0
- parrot/interfaces/images/plugins/hash.py +52 -0
- parrot/interfaces/images/plugins/vision.py +104 -0
- parrot/interfaces/images/plugins/yolo.py +66 -0
- parrot/interfaces/images/plugins/zerodetect.py +197 -0
- parrot/interfaces/o365.py +978 -0
- parrot/interfaces/onedrive.py +822 -0
- parrot/interfaces/sharepoint.py +1435 -0
- parrot/interfaces/soap.py +257 -0
- parrot/loaders/__init__.py +8 -0
- parrot/loaders/abstract.py +1131 -0
- parrot/loaders/audio.py +199 -0
- parrot/loaders/basepdf.py +53 -0
- parrot/loaders/basevideo.py +1568 -0
- parrot/loaders/csv.py +409 -0
- parrot/loaders/docx.py +116 -0
- parrot/loaders/epubloader.py +316 -0
- parrot/loaders/excel.py +199 -0
- parrot/loaders/factory.py +55 -0
- parrot/loaders/files/__init__.py +0 -0
- parrot/loaders/files/abstract.py +39 -0
- parrot/loaders/files/html.py +26 -0
- parrot/loaders/files/text.py +63 -0
- parrot/loaders/html.py +152 -0
- parrot/loaders/markdown.py +442 -0
- parrot/loaders/pdf.py +373 -0
- parrot/loaders/pdfmark.py +320 -0
- parrot/loaders/pdftables.py +506 -0
- parrot/loaders/ppt.py +476 -0
- parrot/loaders/qa.py +63 -0
- parrot/loaders/splitters/__init__.py +10 -0
- parrot/loaders/splitters/base.py +138 -0
- parrot/loaders/splitters/md.py +228 -0
- parrot/loaders/splitters/token.py +143 -0
- parrot/loaders/txt.py +26 -0
- parrot/loaders/video.py +89 -0
- parrot/loaders/videolocal.py +218 -0
- parrot/loaders/videounderstanding.py +377 -0
- parrot/loaders/vimeo.py +167 -0
- parrot/loaders/web.py +599 -0
- parrot/loaders/youtube.py +504 -0
- parrot/manager/__init__.py +5 -0
- parrot/manager/manager.py +1030 -0
- parrot/mcp/__init__.py +28 -0
- parrot/mcp/adapter.py +105 -0
- parrot/mcp/cli.py +174 -0
- parrot/mcp/client.py +119 -0
- parrot/mcp/config.py +75 -0
- parrot/mcp/integration.py +842 -0
- parrot/mcp/oauth.py +933 -0
- parrot/mcp/server.py +225 -0
- parrot/mcp/transports/__init__.py +3 -0
- parrot/mcp/transports/base.py +279 -0
- parrot/mcp/transports/grpc_session.py +163 -0
- parrot/mcp/transports/http.py +312 -0
- parrot/mcp/transports/mcp.proto +108 -0
- parrot/mcp/transports/quic.py +1082 -0
- parrot/mcp/transports/sse.py +330 -0
- parrot/mcp/transports/stdio.py +309 -0
- parrot/mcp/transports/unix.py +395 -0
- parrot/mcp/transports/websocket.py +547 -0
- parrot/memory/__init__.py +16 -0
- parrot/memory/abstract.py +209 -0
- parrot/memory/agent.py +32 -0
- parrot/memory/cache.py +175 -0
- parrot/memory/core.py +555 -0
- parrot/memory/file.py +153 -0
- parrot/memory/mem.py +131 -0
- parrot/memory/redis.py +613 -0
- parrot/models/__init__.py +46 -0
- parrot/models/basic.py +118 -0
- parrot/models/compliance.py +208 -0
- parrot/models/crew.py +395 -0
- parrot/models/detections.py +654 -0
- parrot/models/generation.py +85 -0
- parrot/models/google.py +223 -0
- parrot/models/groq.py +23 -0
- parrot/models/openai.py +30 -0
- parrot/models/outputs.py +285 -0
- parrot/models/responses.py +938 -0
- parrot/notifications/__init__.py +743 -0
- parrot/openapi/__init__.py +3 -0
- parrot/openapi/components.yaml +641 -0
- parrot/openapi/config.py +322 -0
- parrot/outputs/__init__.py +32 -0
- parrot/outputs/formats/__init__.py +108 -0
- parrot/outputs/formats/altair.py +359 -0
- parrot/outputs/formats/application.py +122 -0
- parrot/outputs/formats/base.py +351 -0
- parrot/outputs/formats/bokeh.py +356 -0
- parrot/outputs/formats/card.py +424 -0
- parrot/outputs/formats/chart.py +436 -0
- parrot/outputs/formats/d3.py +255 -0
- parrot/outputs/formats/echarts.py +310 -0
- parrot/outputs/formats/generators/__init__.py +0 -0
- parrot/outputs/formats/generators/abstract.py +61 -0
- parrot/outputs/formats/generators/panel.py +145 -0
- parrot/outputs/formats/generators/streamlit.py +86 -0
- parrot/outputs/formats/generators/terminal.py +63 -0
- parrot/outputs/formats/holoviews.py +310 -0
- parrot/outputs/formats/html.py +147 -0
- parrot/outputs/formats/jinja2.py +46 -0
- parrot/outputs/formats/json.py +87 -0
- parrot/outputs/formats/map.py +933 -0
- parrot/outputs/formats/markdown.py +172 -0
- parrot/outputs/formats/matplotlib.py +237 -0
- parrot/outputs/formats/mixins/__init__.py +0 -0
- parrot/outputs/formats/mixins/emaps.py +855 -0
- parrot/outputs/formats/plotly.py +341 -0
- parrot/outputs/formats/seaborn.py +310 -0
- parrot/outputs/formats/table.py +397 -0
- parrot/outputs/formats/template_report.py +138 -0
- parrot/outputs/formats/yaml.py +125 -0
- parrot/outputs/formatter.py +152 -0
- parrot/outputs/templates/__init__.py +95 -0
- parrot/pipelines/__init__.py +0 -0
- parrot/pipelines/abstract.py +210 -0
- parrot/pipelines/detector.py +124 -0
- parrot/pipelines/models.py +90 -0
- parrot/pipelines/planogram.py +3002 -0
- parrot/pipelines/table.sql +97 -0
- parrot/plugins/__init__.py +106 -0
- parrot/plugins/importer.py +80 -0
- parrot/py.typed +0 -0
- parrot/registry/__init__.py +18 -0
- parrot/registry/registry.py +594 -0
- parrot/scheduler/__init__.py +1189 -0
- parrot/scheduler/models.py +60 -0
- parrot/security/__init__.py +16 -0
- parrot/security/prompt_injection.py +268 -0
- parrot/security/security_events.sql +25 -0
- parrot/services/__init__.py +1 -0
- parrot/services/mcp/__init__.py +8 -0
- parrot/services/mcp/config.py +13 -0
- parrot/services/mcp/server.py +295 -0
- parrot/services/o365_remote_auth.py +235 -0
- parrot/stores/__init__.py +7 -0
- parrot/stores/abstract.py +352 -0
- parrot/stores/arango.py +1090 -0
- parrot/stores/bigquery.py +1377 -0
- parrot/stores/cache.py +106 -0
- parrot/stores/empty.py +10 -0
- parrot/stores/faiss_store.py +1157 -0
- parrot/stores/kb/__init__.py +9 -0
- parrot/stores/kb/abstract.py +68 -0
- parrot/stores/kb/cache.py +165 -0
- parrot/stores/kb/doc.py +325 -0
- parrot/stores/kb/hierarchy.py +346 -0
- parrot/stores/kb/local.py +457 -0
- parrot/stores/kb/prompt.py +28 -0
- parrot/stores/kb/redis.py +659 -0
- parrot/stores/kb/store.py +115 -0
- parrot/stores/kb/user.py +374 -0
- parrot/stores/models.py +59 -0
- parrot/stores/pgvector.py +3 -0
- parrot/stores/postgres.py +2853 -0
- parrot/stores/utils/__init__.py +0 -0
- parrot/stores/utils/chunking.py +197 -0
- parrot/telemetry/__init__.py +3 -0
- parrot/telemetry/mixin.py +111 -0
- parrot/template/__init__.py +3 -0
- parrot/template/engine.py +259 -0
- parrot/tools/__init__.py +23 -0
- parrot/tools/abstract.py +644 -0
- parrot/tools/agent.py +363 -0
- parrot/tools/arangodbsearch.py +537 -0
- parrot/tools/arxiv_tool.py +188 -0
- parrot/tools/calculator/__init__.py +3 -0
- parrot/tools/calculator/operations/__init__.py +38 -0
- parrot/tools/calculator/operations/calculus.py +80 -0
- parrot/tools/calculator/operations/statistics.py +76 -0
- parrot/tools/calculator/tool.py +150 -0
- parrot/tools/cloudwatch.py +988 -0
- parrot/tools/codeinterpreter/__init__.py +127 -0
- parrot/tools/codeinterpreter/executor.py +371 -0
- parrot/tools/codeinterpreter/internals.py +473 -0
- parrot/tools/codeinterpreter/models.py +643 -0
- parrot/tools/codeinterpreter/prompts.py +224 -0
- parrot/tools/codeinterpreter/tool.py +664 -0
- parrot/tools/company_info/__init__.py +6 -0
- parrot/tools/company_info/tool.py +1138 -0
- parrot/tools/correlationanalysis.py +437 -0
- parrot/tools/database/abstract.py +286 -0
- parrot/tools/database/bq.py +115 -0
- parrot/tools/database/cache.py +284 -0
- parrot/tools/database/models.py +95 -0
- parrot/tools/database/pg.py +343 -0
- parrot/tools/databasequery.py +1159 -0
- parrot/tools/db.py +1800 -0
- parrot/tools/ddgo.py +370 -0
- parrot/tools/decorators.py +271 -0
- parrot/tools/dftohtml.py +282 -0
- parrot/tools/document.py +549 -0
- parrot/tools/ecs.py +819 -0
- parrot/tools/edareport.py +368 -0
- parrot/tools/elasticsearch.py +1049 -0
- parrot/tools/employees.py +462 -0
- parrot/tools/epson/__init__.py +96 -0
- parrot/tools/excel.py +683 -0
- parrot/tools/file/__init__.py +13 -0
- parrot/tools/file/abstract.py +76 -0
- parrot/tools/file/gcs.py +378 -0
- parrot/tools/file/local.py +284 -0
- parrot/tools/file/s3.py +511 -0
- parrot/tools/file/tmp.py +309 -0
- parrot/tools/file/tool.py +501 -0
- parrot/tools/file_reader.py +129 -0
- parrot/tools/flowtask/__init__.py +19 -0
- parrot/tools/flowtask/tool.py +761 -0
- parrot/tools/gittoolkit.py +508 -0
- parrot/tools/google/__init__.py +18 -0
- parrot/tools/google/base.py +169 -0
- parrot/tools/google/tools.py +1251 -0
- parrot/tools/googlelocation.py +5 -0
- parrot/tools/googleroutes.py +5 -0
- parrot/tools/googlesearch.py +5 -0
- parrot/tools/googlesitesearch.py +5 -0
- parrot/tools/googlevoice.py +2 -0
- parrot/tools/gvoice.py +695 -0
- parrot/tools/ibisworld/README.md +225 -0
- parrot/tools/ibisworld/__init__.py +11 -0
- parrot/tools/ibisworld/tool.py +366 -0
- parrot/tools/jiratoolkit.py +1718 -0
- parrot/tools/manager.py +1098 -0
- parrot/tools/math.py +152 -0
- parrot/tools/metadata.py +476 -0
- parrot/tools/msteams.py +1621 -0
- parrot/tools/msword.py +635 -0
- parrot/tools/multidb.py +580 -0
- parrot/tools/multistoresearch.py +369 -0
- parrot/tools/networkninja.py +167 -0
- parrot/tools/nextstop/__init__.py +4 -0
- parrot/tools/nextstop/base.py +286 -0
- parrot/tools/nextstop/employee.py +733 -0
- parrot/tools/nextstop/store.py +462 -0
- parrot/tools/notification.py +435 -0
- parrot/tools/o365/__init__.py +42 -0
- parrot/tools/o365/base.py +295 -0
- parrot/tools/o365/bundle.py +522 -0
- parrot/tools/o365/events.py +554 -0
- parrot/tools/o365/mail.py +992 -0
- parrot/tools/o365/onedrive.py +497 -0
- parrot/tools/o365/sharepoint.py +641 -0
- parrot/tools/openapi_toolkit.py +904 -0
- parrot/tools/openweather.py +527 -0
- parrot/tools/pdfprint.py +1001 -0
- parrot/tools/powerbi.py +518 -0
- parrot/tools/powerpoint.py +1113 -0
- parrot/tools/pricestool.py +146 -0
- parrot/tools/products/__init__.py +246 -0
- parrot/tools/prophet_tool.py +171 -0
- parrot/tools/pythonpandas.py +630 -0
- parrot/tools/pythonrepl.py +910 -0
- parrot/tools/qsource.py +436 -0
- parrot/tools/querytoolkit.py +395 -0
- parrot/tools/quickeda.py +827 -0
- parrot/tools/resttool.py +553 -0
- parrot/tools/retail/__init__.py +0 -0
- parrot/tools/retail/bby.py +528 -0
- parrot/tools/sandboxtool.py +703 -0
- parrot/tools/sassie/__init__.py +352 -0
- parrot/tools/scraping/__init__.py +7 -0
- parrot/tools/scraping/docs/select.md +466 -0
- parrot/tools/scraping/documentation.md +1278 -0
- parrot/tools/scraping/driver.py +436 -0
- parrot/tools/scraping/models.py +576 -0
- parrot/tools/scraping/options.py +85 -0
- parrot/tools/scraping/orchestrator.py +517 -0
- parrot/tools/scraping/readme.md +740 -0
- parrot/tools/scraping/tool.py +3115 -0
- parrot/tools/seasonaldetection.py +642 -0
- parrot/tools/shell_tool/__init__.py +5 -0
- parrot/tools/shell_tool/actions.py +408 -0
- parrot/tools/shell_tool/engine.py +155 -0
- parrot/tools/shell_tool/models.py +322 -0
- parrot/tools/shell_tool/tool.py +442 -0
- parrot/tools/site_search.py +214 -0
- parrot/tools/textfile.py +418 -0
- parrot/tools/think.py +378 -0
- parrot/tools/toolkit.py +298 -0
- parrot/tools/webapp_tool.py +187 -0
- parrot/tools/whatif.py +1279 -0
- parrot/tools/workday/MULTI_WSDL_EXAMPLE.md +249 -0
- parrot/tools/workday/__init__.py +6 -0
- parrot/tools/workday/models.py +1389 -0
- parrot/tools/workday/tool.py +1293 -0
- parrot/tools/yfinance_tool.py +306 -0
- parrot/tools/zipcode.py +217 -0
- parrot/utils/__init__.py +2 -0
- parrot/utils/helpers.py +73 -0
- parrot/utils/parsers/__init__.py +5 -0
- parrot/utils/parsers/toml.c +12078 -0
- parrot/utils/parsers/toml.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/parsers/toml.pyx +21 -0
- parrot/utils/toml.py +11 -0
- parrot/utils/types.cpp +20936 -0
- parrot/utils/types.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/types.pyx +213 -0
- parrot/utils/uv.py +11 -0
- parrot/version.py +10 -0
- parrot/yaml-rs/Cargo.lock +350 -0
- parrot/yaml-rs/Cargo.toml +19 -0
- parrot/yaml-rs/pyproject.toml +19 -0
- parrot/yaml-rs/python/yaml_rs/__init__.py +81 -0
- parrot/yaml-rs/src/lib.rs +222 -0
- requirements/docker-compose.yml +24 -0
- requirements/requirements-dev.txt +21 -0
parrot/clients/claude.py
ADDED
|
@@ -0,0 +1,1191 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import AsyncIterator, Dict, List, Optional, Union, Any
|
|
3
|
+
from typing import List as TypingList
|
|
4
|
+
import base64
|
|
5
|
+
import io
|
|
6
|
+
import time
|
|
7
|
+
from enum import Enum
|
|
8
|
+
import uuid
|
|
9
|
+
import logging
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
import mimetypes
|
|
12
|
+
from PIL import Image
|
|
13
|
+
from pydantic import BaseModel, Field
|
|
14
|
+
from navconfig import config
|
|
15
|
+
from datamodel.exceptions import ParserError # pylint: disable=E0611 # noqa
|
|
16
|
+
from datamodel.parsers.json import json_decoder # pylint: disable=E0611 # noqa
|
|
17
|
+
from anthropic import AsyncAnthropic
|
|
18
|
+
from anthropic.types import Message, MessageStreamEvent
|
|
19
|
+
from .base import AbstractClient, BatchRequest, StreamingRetryConfig
|
|
20
|
+
from ..models import (
|
|
21
|
+
AIMessage,
|
|
22
|
+
AIMessageFactory,
|
|
23
|
+
ToolCall,
|
|
24
|
+
OutputFormat,
|
|
25
|
+
StructuredOutputConfig,
|
|
26
|
+
ObjectDetectionResult
|
|
27
|
+
)
|
|
28
|
+
from ..models.outputs import (
|
|
29
|
+
SentimentAnalysis,
|
|
30
|
+
ProductReview
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
logging.getLogger("anthropic").setLevel(logging.WARNING)
|
|
34
|
+
|
|
35
|
+
class ClaudeModel(Enum):
|
|
36
|
+
"""Enum for Claude models."""
|
|
37
|
+
SONNET_4 = "claude-sonnet-4-20250514"
|
|
38
|
+
SONNET_4_5 = "claude-sonnet-4-5"
|
|
39
|
+
OPUS_4 = "claude-opus-4-20241022"
|
|
40
|
+
OPUS_4_1 = "claude-opus-4-1"
|
|
41
|
+
SONNET_3_5 = "claude-3-5-sonnet-20241022"
|
|
42
|
+
HAIKU_3_5 = "claude-3-5-haiku-20241022"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class AnthropicClient(AbstractClient):
|
|
46
|
+
"""Client for interacting with the Anthropic API using the official SDK."""
|
|
47
|
+
version: str = "2023-06-01"
|
|
48
|
+
client_type: str = "anthropic"
|
|
49
|
+
client_name: str = "claude"
|
|
50
|
+
use_session: bool = False
|
|
51
|
+
_default_model: str = 'claude-sonnet-4-5'
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
api_key: str = None,
|
|
56
|
+
base_url: str = "https://api.anthropic.com",
|
|
57
|
+
**kwargs
|
|
58
|
+
):
|
|
59
|
+
self.api_key = api_key or config.get('ANTHROPIC_API_KEY')
|
|
60
|
+
self.base_url = base_url
|
|
61
|
+
self.client: Optional[AsyncAnthropic] = None
|
|
62
|
+
self.base_headers = {
|
|
63
|
+
"Content-Type": "application/json",
|
|
64
|
+
"x-api-key": self.api_key,
|
|
65
|
+
"anthropic-version": self.version
|
|
66
|
+
}
|
|
67
|
+
super().__init__(**kwargs)
|
|
68
|
+
|
|
69
|
+
async def get_client(self) -> AsyncAnthropic:
|
|
70
|
+
"""Initialize the Anthropic client."""
|
|
71
|
+
return AsyncAnthropic(
|
|
72
|
+
api_key=self.api_key,
|
|
73
|
+
max_retries=2
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
async def ask(
|
|
77
|
+
self,
|
|
78
|
+
prompt: str,
|
|
79
|
+
model: Union[Enum, str] = None,
|
|
80
|
+
max_tokens: Optional[int] = None,
|
|
81
|
+
temperature: Optional[float] = None,
|
|
82
|
+
files: Optional[List[Union[str, Path]]] = None,
|
|
83
|
+
system_prompt: Optional[str] = None,
|
|
84
|
+
structured_output: Union[type, StructuredOutputConfig, None] = None,
|
|
85
|
+
user_id: Optional[str] = None,
|
|
86
|
+
session_id: Optional[str] = None,
|
|
87
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
|
+
use_tools: Optional[bool] = None,
|
|
89
|
+
deep_research: bool = False,
|
|
90
|
+
background: bool = False,
|
|
91
|
+
lazy_loading: bool = False,
|
|
92
|
+
) -> AIMessage:
|
|
93
|
+
"""Ask Claude a question with optional conversation memory.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
use_tools: If None, uses instance default. If True/False, overrides for this call.
|
|
97
|
+
deep_research: If True, use enhanced system prompt for thorough research
|
|
98
|
+
background: If True, execute research in background mode (not yet supported)
|
|
99
|
+
lazy_loading: If True, enable dynamic tool searching
|
|
100
|
+
"""
|
|
101
|
+
if not self.client:
|
|
102
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
103
|
+
|
|
104
|
+
# If use_tools is None, use the instance default
|
|
105
|
+
_use_tools = use_tools if use_tools is not None else self.enable_tools
|
|
106
|
+
|
|
107
|
+
# For deep research, automatically enable tools
|
|
108
|
+
if deep_research:
|
|
109
|
+
_use_tools = True
|
|
110
|
+
self.logger.info("Deep research mode enabled: activating enhanced research prompt and tools")
|
|
111
|
+
|
|
112
|
+
model = (model.value if isinstance(model, ClaudeModel) else model) or (self.model or self.default_model)
|
|
113
|
+
# Generate unique turn ID for tracking
|
|
114
|
+
turn_id = str(uuid.uuid4())
|
|
115
|
+
original_prompt = prompt
|
|
116
|
+
|
|
117
|
+
messages, conversation_history, system_prompt = await self._prepare_conversation_context(
|
|
118
|
+
prompt, files, user_id, session_id, system_prompt
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Enhance system prompt for deep research mode
|
|
122
|
+
if deep_research:
|
|
123
|
+
research_prompt = self._get_deep_research_system_prompt()
|
|
124
|
+
system_prompt = (
|
|
125
|
+
f"{system_prompt}\n\n{research_prompt}"
|
|
126
|
+
if system_prompt
|
|
127
|
+
else research_prompt
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Lazy loading system prompt
|
|
131
|
+
if lazy_loading:
|
|
132
|
+
search_prompt = "You have access to a library of tools. Use the 'search_tools' function to find relevant tools."
|
|
133
|
+
system_prompt = (
|
|
134
|
+
f"{system_prompt}\n\n{search_prompt}"
|
|
135
|
+
if system_prompt
|
|
136
|
+
else search_prompt
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
output_config = self._get_structured_config(
|
|
140
|
+
structured_output
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
if structured_output:
|
|
144
|
+
schema_instruction = output_config.format_schema_instruction()
|
|
145
|
+
system_prompt = (
|
|
146
|
+
f"{system_prompt}\n\n{schema_instruction}"
|
|
147
|
+
if system_prompt
|
|
148
|
+
else schema_instruction
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
payload = {
|
|
152
|
+
"model": model,
|
|
153
|
+
"max_tokens": max_tokens or self.max_tokens,
|
|
154
|
+
"temperature": temperature or self.temperature,
|
|
155
|
+
"messages": messages
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
if system_prompt:
|
|
159
|
+
payload["system"] = system_prompt
|
|
160
|
+
|
|
161
|
+
if _use_tools and (tools and isinstance(tools, list)):
|
|
162
|
+
for tool in tools:
|
|
163
|
+
self.register_tool(tool)
|
|
164
|
+
|
|
165
|
+
# LAZY LOADING LOGIC
|
|
166
|
+
active_tool_names = set()
|
|
167
|
+
|
|
168
|
+
if _use_tools:
|
|
169
|
+
if lazy_loading:
|
|
170
|
+
prepared = self._prepare_lazy_tools()
|
|
171
|
+
if prepared:
|
|
172
|
+
payload["tools"] = prepared
|
|
173
|
+
active_tool_names.add("search_tools")
|
|
174
|
+
else:
|
|
175
|
+
payload["tools"] = self._prepare_tools()
|
|
176
|
+
|
|
177
|
+
# Track tool calls for the response
|
|
178
|
+
all_tool_calls = []
|
|
179
|
+
|
|
180
|
+
# Handle tool calls in a loop
|
|
181
|
+
while True:
|
|
182
|
+
# Use the Anthropic SDK to create messages
|
|
183
|
+
response = await self.client.messages.create(**payload)
|
|
184
|
+
# Convert Message object to dict for compatibility
|
|
185
|
+
result = response.model_dump()
|
|
186
|
+
|
|
187
|
+
# Check if Claude wants to use a tool
|
|
188
|
+
if result.get("stop_reason") == "tool_use":
|
|
189
|
+
tool_results = []
|
|
190
|
+
found_new_tools = False
|
|
191
|
+
|
|
192
|
+
for content_block in result["content"]:
|
|
193
|
+
if content_block["type"] == "tool_use":
|
|
194
|
+
tool_name = content_block["name"]
|
|
195
|
+
tool_input = content_block["input"]
|
|
196
|
+
tool_id = content_block["id"]
|
|
197
|
+
|
|
198
|
+
# Create ToolCall object and execute
|
|
199
|
+
tc = ToolCall(
|
|
200
|
+
id=tool_id,
|
|
201
|
+
name=tool_name,
|
|
202
|
+
arguments=tool_input
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
start_time = time.time()
|
|
207
|
+
tool_result = await self._execute_tool(tool_name, tool_input)
|
|
208
|
+
execution_time = time.time() - start_time
|
|
209
|
+
|
|
210
|
+
# Lazy Loading Check
|
|
211
|
+
if lazy_loading and tool_name == "search_tools":
|
|
212
|
+
new_tools = self._check_new_tools(tool_name, str(tool_result))
|
|
213
|
+
if new_tools:
|
|
214
|
+
for nt in new_tools:
|
|
215
|
+
if nt not in active_tool_names:
|
|
216
|
+
active_tool_names.add(nt)
|
|
217
|
+
found_new_tools = True
|
|
218
|
+
|
|
219
|
+
tc.result = tool_result
|
|
220
|
+
tc.execution_time = execution_time
|
|
221
|
+
|
|
222
|
+
tool_results.append({
|
|
223
|
+
"type": "tool_result",
|
|
224
|
+
"tool_use_id": tool_id,
|
|
225
|
+
"content": str(tool_result)
|
|
226
|
+
})
|
|
227
|
+
except Exception as e:
|
|
228
|
+
tc.error = str(e)
|
|
229
|
+
tool_results.append({
|
|
230
|
+
"type": "tool_result",
|
|
231
|
+
"tool_use_id": tool_id,
|
|
232
|
+
"is_error": True,
|
|
233
|
+
"content": str(e)
|
|
234
|
+
})
|
|
235
|
+
|
|
236
|
+
all_tool_calls.append(tc)
|
|
237
|
+
|
|
238
|
+
# Update available tools if new ones found
|
|
239
|
+
if lazy_loading and found_new_tools:
|
|
240
|
+
payload["tools"] = self._prepare_tools(filter_names=list(active_tool_names))
|
|
241
|
+
|
|
242
|
+
# Add tool results and continue conversation
|
|
243
|
+
messages.append({"role": "assistant", "content": result["content"]})
|
|
244
|
+
messages.append({"role": "user", "content": tool_results})
|
|
245
|
+
payload["messages"] = messages
|
|
246
|
+
else:
|
|
247
|
+
# No more tool calls, assistant response final
|
|
248
|
+
messages.append({"role": "assistant", "content": result["content"]})
|
|
249
|
+
break
|
|
250
|
+
|
|
251
|
+
# Handle structured output
|
|
252
|
+
final_output = None
|
|
253
|
+
if structured_output:
|
|
254
|
+
# Extract text content from Claude's response
|
|
255
|
+
text_content = "".join(
|
|
256
|
+
content_block["text"]
|
|
257
|
+
for content_block in result["content"]
|
|
258
|
+
if content_block["type"] == "text"
|
|
259
|
+
)
|
|
260
|
+
try:
|
|
261
|
+
if output_config.custom_parser:
|
|
262
|
+
final_output = await output_config.custom_parser(
|
|
263
|
+
text_content
|
|
264
|
+
)
|
|
265
|
+
final_output = await self._parse_structured_output(
|
|
266
|
+
text_content,
|
|
267
|
+
output_config
|
|
268
|
+
)
|
|
269
|
+
except Exception:
|
|
270
|
+
final_output = text_content
|
|
271
|
+
|
|
272
|
+
# Extract assistant response text for conversation memory
|
|
273
|
+
assistant_response_text = "".join(
|
|
274
|
+
content_block.get("text", "")
|
|
275
|
+
for content_block in result.get("content", [])
|
|
276
|
+
if content_block.get("type") == "text"
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
# Update conversation memory with unified system
|
|
280
|
+
tools_used = [tc.name for tc in all_tool_calls]
|
|
281
|
+
await self._update_conversation_memory(
|
|
282
|
+
user_id,
|
|
283
|
+
session_id,
|
|
284
|
+
conversation_history,
|
|
285
|
+
messages,
|
|
286
|
+
system_prompt,
|
|
287
|
+
turn_id,
|
|
288
|
+
original_prompt,
|
|
289
|
+
assistant_response_text,
|
|
290
|
+
tools_used
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# Create AIMessage using factory
|
|
294
|
+
return AIMessageFactory.from_claude(
|
|
295
|
+
response=result,
|
|
296
|
+
input_text=original_prompt,
|
|
297
|
+
model=model,
|
|
298
|
+
user_id=user_id,
|
|
299
|
+
session_id=session_id,
|
|
300
|
+
turn_id=turn_id,
|
|
301
|
+
structured_output=final_output,
|
|
302
|
+
tool_calls=all_tool_calls
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
async def ask_stream(
|
|
306
|
+
self,
|
|
307
|
+
prompt: str,
|
|
308
|
+
model: Union[ClaudeModel, str] = None,
|
|
309
|
+
max_tokens: Optional[int] = None,
|
|
310
|
+
temperature: Optional[float] = None,
|
|
311
|
+
files: Optional[List[Union[str, Path]]] = None,
|
|
312
|
+
system_prompt: Optional[str] = None,
|
|
313
|
+
user_id: Optional[str] = None,
|
|
314
|
+
session_id: Optional[str] = None,
|
|
315
|
+
retry_config: Optional[StreamingRetryConfig] = None,
|
|
316
|
+
on_max_tokens: Optional[str] = "retry", # "retry", "notify", "ignore"
|
|
317
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
318
|
+
deep_research: bool = False,
|
|
319
|
+
agent_config: Optional[Dict[str, Any]] = None,
|
|
320
|
+
lazy_loading: bool = False,
|
|
321
|
+
) -> AsyncIterator[str]:
|
|
322
|
+
"""Stream Claude's response using AsyncIterator with optional conversation memory.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
deep_research: If True, use enhanced system prompt for thorough research
|
|
326
|
+
agent_config: Optional configuration (not used, for interface compatibility)
|
|
327
|
+
"""
|
|
328
|
+
if not self.client:
|
|
329
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
330
|
+
|
|
331
|
+
# Generate unique turn ID for tracking
|
|
332
|
+
turn_id = str(uuid.uuid4())
|
|
333
|
+
original_prompt = prompt
|
|
334
|
+
|
|
335
|
+
# Default retry configuration
|
|
336
|
+
if retry_config is None:
|
|
337
|
+
retry_config = StreamingRetryConfig()
|
|
338
|
+
|
|
339
|
+
messages, conversation_history, system_prompt = await self._prepare_conversation_context(
|
|
340
|
+
prompt, files, user_id, session_id, system_prompt
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# Enhance system prompt for deep research mode
|
|
344
|
+
if deep_research:
|
|
345
|
+
research_prompt = self._get_deep_research_system_prompt()
|
|
346
|
+
system_prompt = (
|
|
347
|
+
f"{system_prompt}\n\n{research_prompt}"
|
|
348
|
+
if system_prompt
|
|
349
|
+
else research_prompt
|
|
350
|
+
)
|
|
351
|
+
self.logger.info("Deep research mode enabled for streaming")
|
|
352
|
+
|
|
353
|
+
if tools and isinstance(tools, list):
|
|
354
|
+
for tool in tools:
|
|
355
|
+
self.register_tool(tool)
|
|
356
|
+
|
|
357
|
+
current_max_tokens = max_tokens or self.max_tokens
|
|
358
|
+
retry_count = 0
|
|
359
|
+
assistant_content = ""
|
|
360
|
+
model = (
|
|
361
|
+
model.value if isinstance(model, ClaudeModel) else model
|
|
362
|
+
) or (self.model or self.default_model)
|
|
363
|
+
while retry_count <= retry_config.max_retries:
|
|
364
|
+
try:
|
|
365
|
+
payload = {
|
|
366
|
+
"model": model,
|
|
367
|
+
"max_tokens": current_max_tokens,
|
|
368
|
+
"temperature": temperature or self.temperature,
|
|
369
|
+
"messages": messages
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
if system_prompt:
|
|
373
|
+
payload["system"] = system_prompt
|
|
374
|
+
|
|
375
|
+
payload["tools"] = self._prepare_tools()
|
|
376
|
+
|
|
377
|
+
assistant_content = ""
|
|
378
|
+
max_tokens_reached = False
|
|
379
|
+
stop_reason = None
|
|
380
|
+
|
|
381
|
+
try:
|
|
382
|
+
# Use the Anthropic SDK's streaming API
|
|
383
|
+
async with self.client.messages.stream(**payload) as stream:
|
|
384
|
+
async for text in stream.text_stream:
|
|
385
|
+
assistant_content += text
|
|
386
|
+
yield text
|
|
387
|
+
|
|
388
|
+
# Get the final message to check stop reason
|
|
389
|
+
final_message = await stream.get_final_message()
|
|
390
|
+
stop_reason = final_message.stop_reason
|
|
391
|
+
if stop_reason == 'max_tokens':
|
|
392
|
+
max_tokens_reached = True
|
|
393
|
+
|
|
394
|
+
except Exception as e:
|
|
395
|
+
# Handle rate limits and server errors
|
|
396
|
+
error_str = str(e).lower()
|
|
397
|
+
if '429' in error_str or 'rate limit' in error_str:
|
|
398
|
+
if retry_config.retry_on_rate_limit and retry_count < retry_config.max_retries:
|
|
399
|
+
yield f"\n\n⚠️ **Rate limited (attempt {retry_count + 1}). Retrying...**\n\n"
|
|
400
|
+
retry_count += 1
|
|
401
|
+
await self._wait_with_backoff(retry_count, retry_config)
|
|
402
|
+
continue
|
|
403
|
+
else:
|
|
404
|
+
yield f"\n\n❌ **Rate limit exceeded. Max retries reached.**\n"
|
|
405
|
+
break
|
|
406
|
+
elif '5' in error_str[:3]: # 5xx errors
|
|
407
|
+
if retry_config.retry_on_server_error and retry_count < retry_config.max_retries:
|
|
408
|
+
yield f"\n\n⚠️ **Server error (attempt {retry_count + 1}). Retrying...**\n\n"
|
|
409
|
+
retry_count += 1
|
|
410
|
+
await self._wait_with_backoff(retry_count, retry_config)
|
|
411
|
+
continue
|
|
412
|
+
else:
|
|
413
|
+
yield f"\n\n❌ **Server error. Max retries reached.**\n"
|
|
414
|
+
break
|
|
415
|
+
else:
|
|
416
|
+
raise
|
|
417
|
+
# Check if we reached max tokens
|
|
418
|
+
if max_tokens_reached:
|
|
419
|
+
if on_max_tokens == "notify":
|
|
420
|
+
yield f"\n\n⚠️ **Response truncated due to token limit ({current_max_tokens} tokens). The response may be incomplete.**\n"
|
|
421
|
+
elif on_max_tokens == "retry" and retry_config.auto_retry_on_max_tokens:
|
|
422
|
+
if retry_count < retry_config.max_retries:
|
|
423
|
+
# Increase token limit for retry
|
|
424
|
+
new_max_tokens = int(current_max_tokens * retry_config.token_increase_factor)
|
|
425
|
+
|
|
426
|
+
# Notify user about retry
|
|
427
|
+
yield f"\n\n🔄 **Response reached token limit ({current_max_tokens}). Retrying with increased limit ({new_max_tokens})...**\n\n"
|
|
428
|
+
|
|
429
|
+
current_max_tokens = new_max_tokens
|
|
430
|
+
retry_count += 1
|
|
431
|
+
|
|
432
|
+
# Wait before retry
|
|
433
|
+
await self._wait_with_backoff(retry_count, retry_config)
|
|
434
|
+
continue
|
|
435
|
+
else:
|
|
436
|
+
# Max retries reached
|
|
437
|
+
yield f"\n\n❌ **Maximum retries reached. Response may be incomplete due to token limits.**\n"
|
|
438
|
+
elif on_max_tokens == "ignore":
|
|
439
|
+
continue # Just ignore and yield what we have
|
|
440
|
+
# If we get here, streaming completed successfully
|
|
441
|
+
break
|
|
442
|
+
except Exception as e:
|
|
443
|
+
if retry_count < retry_config.max_retries:
|
|
444
|
+
error_msg = f"\n\n⚠️ **Streaming error (attempt {retry_count + 1}): {str(e)}. Retrying...**\n\n"
|
|
445
|
+
yield error_msg
|
|
446
|
+
|
|
447
|
+
retry_count += 1
|
|
448
|
+
await self._wait_with_backoff(retry_count, retry_config)
|
|
449
|
+
continue
|
|
450
|
+
else:
|
|
451
|
+
# Max retries reached, yield error and break
|
|
452
|
+
yield f"\n\n❌ **Streaming failed after {retry_config.max_retries} retries: {str(e)}**\n"
|
|
453
|
+
break
|
|
454
|
+
|
|
455
|
+
# Update conversation memory
|
|
456
|
+
if assistant_content:
|
|
457
|
+
await self._update_conversation_memory(
|
|
458
|
+
user_id,
|
|
459
|
+
session_id,
|
|
460
|
+
conversation_history,
|
|
461
|
+
messages + [{
|
|
462
|
+
"role": "assistant",
|
|
463
|
+
"content": [{"type": "text", "text": assistant_content}]
|
|
464
|
+
}],
|
|
465
|
+
system_prompt,
|
|
466
|
+
turn_id,
|
|
467
|
+
original_prompt,
|
|
468
|
+
assistant_content,
|
|
469
|
+
[] # No tools used in streaming
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
async def batch_ask(self, requests: List[BatchRequest]) -> List[AIMessage]:
|
|
473
|
+
"""Process multiple requests in batch."""
|
|
474
|
+
if not self.client:
|
|
475
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
476
|
+
|
|
477
|
+
# Prepare batch payload in correct format
|
|
478
|
+
batch_payload = {
|
|
479
|
+
"requests": [
|
|
480
|
+
{
|
|
481
|
+
"custom_id": req.custom_id,
|
|
482
|
+
"params": req.params
|
|
483
|
+
}
|
|
484
|
+
for req in requests
|
|
485
|
+
]
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
# Create batch using SDK
|
|
489
|
+
batch = await self.client.messages.batches.create(**batch_payload)
|
|
490
|
+
batch_id = batch.id
|
|
491
|
+
|
|
492
|
+
# Poll for completion
|
|
493
|
+
while True:
|
|
494
|
+
batch_status = await self.client.messages.batches.retrieve(batch_id)
|
|
495
|
+
|
|
496
|
+
if batch_status.processing_status == "ended":
|
|
497
|
+
break
|
|
498
|
+
elif batch_status.processing_status in ["failed", "canceled"]:
|
|
499
|
+
raise RuntimeError(f"Batch processing failed: {batch_status}")
|
|
500
|
+
|
|
501
|
+
await asyncio.sleep(5) # Wait 5 seconds before polling again
|
|
502
|
+
|
|
503
|
+
# Retrieve results
|
|
504
|
+
results_url = batch_status.results_url
|
|
505
|
+
if results_url:
|
|
506
|
+
# Note: SDK may not have direct results download, so we use session for this
|
|
507
|
+
if not self.session:
|
|
508
|
+
import aiohttp
|
|
509
|
+
async with aiohttp.ClientSession() as temp_session:
|
|
510
|
+
async with temp_session.get(results_url) as response:
|
|
511
|
+
response.raise_for_status()
|
|
512
|
+
results_text = await response.text()
|
|
513
|
+
else:
|
|
514
|
+
async with self.session.get(results_url) as response:
|
|
515
|
+
response.raise_for_status()
|
|
516
|
+
results_text = await response.text()
|
|
517
|
+
|
|
518
|
+
# Parse JSONL format and convert to AIMessage
|
|
519
|
+
results = []
|
|
520
|
+
for line in results_text.strip().split('\n'):
|
|
521
|
+
if line:
|
|
522
|
+
batch_result = json_decoder(line)
|
|
523
|
+
# Extract the response from batch format
|
|
524
|
+
if 'response' in batch_result and 'body' in batch_result['response']:
|
|
525
|
+
claude_response = batch_result['response']['body']
|
|
526
|
+
|
|
527
|
+
# Create AIMessage from batch result
|
|
528
|
+
ai_message = AIMessageFactory.from_claude(
|
|
529
|
+
response=claude_response,
|
|
530
|
+
input_text="Batch request",
|
|
531
|
+
model=claude_response.get('model', 'unknown'),
|
|
532
|
+
turn_id=str(uuid.uuid4())
|
|
533
|
+
)
|
|
534
|
+
results.append(ai_message)
|
|
535
|
+
else:
|
|
536
|
+
# Fallback for unexpected format
|
|
537
|
+
results.append(batch_result)
|
|
538
|
+
|
|
539
|
+
return results
|
|
540
|
+
else:
|
|
541
|
+
raise RuntimeError("No results URL provided in batch status")
|
|
542
|
+
|
|
543
|
+
def _encode_image_for_claude(
|
|
544
|
+
self,
|
|
545
|
+
image: Union[Path, bytes, Image.Image]
|
|
546
|
+
) -> Dict[str, Any]:
|
|
547
|
+
"""Encode image for Claude's vision API."""
|
|
548
|
+
|
|
549
|
+
if isinstance(image, Path):
|
|
550
|
+
if not image.exists():
|
|
551
|
+
raise FileNotFoundError(f"Image file not found: {image}")
|
|
552
|
+
|
|
553
|
+
# Get mime type
|
|
554
|
+
mime_type, _ = mimetypes.guess_type(str(image))
|
|
555
|
+
if not mime_type or not mime_type.startswith('image/'):
|
|
556
|
+
mime_type = "image/jpeg" # Default fallback
|
|
557
|
+
|
|
558
|
+
# Read and encode the file
|
|
559
|
+
with open(image, "rb") as f:
|
|
560
|
+
encoded_data = base64.b64encode(f.read()).decode('utf-8')
|
|
561
|
+
|
|
562
|
+
elif isinstance(image, bytes):
|
|
563
|
+
# Handle raw bytes
|
|
564
|
+
mime_type = "image/jpeg" # Default, could be improved with image format detection
|
|
565
|
+
encoded_data = base64.b64encode(image).decode('utf-8')
|
|
566
|
+
|
|
567
|
+
elif isinstance(image, Image.Image):
|
|
568
|
+
# Handle PIL Image object
|
|
569
|
+
buffer = io.BytesIO()
|
|
570
|
+
# Save as JPEG by default (could be made configurable)
|
|
571
|
+
image_format = "JPEG"
|
|
572
|
+
if image.mode in ("RGBA", "LA", "P"):
|
|
573
|
+
# Convert to RGB for JPEG compatibility
|
|
574
|
+
image = image.convert("RGB")
|
|
575
|
+
|
|
576
|
+
image.save(buffer, format=image_format)
|
|
577
|
+
encoded_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
|
578
|
+
mime_type = f"image/{image_format.lower()}"
|
|
579
|
+
|
|
580
|
+
else:
|
|
581
|
+
raise ValueError("Image must be a Path, bytes, or PIL.Image object.")
|
|
582
|
+
|
|
583
|
+
return {
|
|
584
|
+
"type": "image",
|
|
585
|
+
"source": {
|
|
586
|
+
"type": "base64",
|
|
587
|
+
"media_type": mime_type,
|
|
588
|
+
"data": encoded_data
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
async def ask_to_image(
|
|
593
|
+
self,
|
|
594
|
+
prompt: str,
|
|
595
|
+
image: Union[Path, bytes, Image.Image],
|
|
596
|
+
reference_images: Optional[List[Union[Path, bytes, Image.Image]]] = None,
|
|
597
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
598
|
+
max_tokens: Optional[int] = None,
|
|
599
|
+
temperature: Optional[float] = None,
|
|
600
|
+
structured_output: Union[type, StructuredOutputConfig] = None,
|
|
601
|
+
count_objects: bool = False,
|
|
602
|
+
user_id: Optional[str] = None,
|
|
603
|
+
session_id: Optional[str] = None,
|
|
604
|
+
system_prompt: Optional[str] = None
|
|
605
|
+
) -> AIMessage:
|
|
606
|
+
"""
|
|
607
|
+
Ask Claude a question about an image with optional conversation memory.
|
|
608
|
+
|
|
609
|
+
Args:
|
|
610
|
+
prompt (str): The question or prompt about the image.
|
|
611
|
+
image (Union[Path, bytes, Image.Image]): The primary image to analyze.
|
|
612
|
+
reference_images (Optional[List[Union[Path, bytes, Image.Image]]]):
|
|
613
|
+
Optional reference images.
|
|
614
|
+
model (Union[ClaudeModel, str]): The Claude model to use.
|
|
615
|
+
max_tokens (int): Maximum tokens for the response.
|
|
616
|
+
temperature (float): Sampling temperature.
|
|
617
|
+
structured_output (Union[type, StructuredOutputConfig]):
|
|
618
|
+
Optional structured output format.
|
|
619
|
+
count_objects (bool):
|
|
620
|
+
Whether to count objects in the image (enables default JSON output).
|
|
621
|
+
user_id (Optional[str]): User identifier for conversation memory.
|
|
622
|
+
session_id (Optional[str]): Session identifier for conversation memory.
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
AIMessage: The response from Claude about the image.
|
|
626
|
+
"""
|
|
627
|
+
if not self.client:
|
|
628
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
629
|
+
|
|
630
|
+
# Generate unique turn ID for tracking
|
|
631
|
+
turn_id = str(uuid.uuid4())
|
|
632
|
+
original_prompt = prompt
|
|
633
|
+
|
|
634
|
+
# Get conversation history if available
|
|
635
|
+
conversation_history = None
|
|
636
|
+
messages = []
|
|
637
|
+
|
|
638
|
+
# Get conversation context (but don't include files since we handle images separately)
|
|
639
|
+
if user_id and session_id and self.conversation_memory:
|
|
640
|
+
chatbot_key = self._get_chatbot_key()
|
|
641
|
+
# Get or create conversation history
|
|
642
|
+
conversation_history = await self.conversation_memory.get_history(
|
|
643
|
+
user_id,
|
|
644
|
+
session_id,
|
|
645
|
+
chatbot_id=chatbot_key
|
|
646
|
+
)
|
|
647
|
+
if not conversation_history:
|
|
648
|
+
conversation_history = await self.conversation_memory.create_history(
|
|
649
|
+
user_id,
|
|
650
|
+
session_id,
|
|
651
|
+
chatbot_id=chatbot_key
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
# Get previous conversation messages for context
|
|
655
|
+
# Convert turns to API message format
|
|
656
|
+
messages = conversation_history.get_messages_for_api()
|
|
657
|
+
|
|
658
|
+
output_config = self._get_structured_config(
|
|
659
|
+
structured_output
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
# Prepare the content for the current message
|
|
663
|
+
content = []
|
|
664
|
+
|
|
665
|
+
# Add the primary image first
|
|
666
|
+
primary_image_content = self._encode_image_for_claude(image)
|
|
667
|
+
content.append(primary_image_content)
|
|
668
|
+
|
|
669
|
+
# Add reference images if provided
|
|
670
|
+
if reference_images:
|
|
671
|
+
for ref_image in reference_images:
|
|
672
|
+
ref_image_content = self._encode_image_for_claude(ref_image)
|
|
673
|
+
content.append(ref_image_content)
|
|
674
|
+
|
|
675
|
+
# Add the text prompt last
|
|
676
|
+
content.append({
|
|
677
|
+
"type": "text",
|
|
678
|
+
"text": prompt
|
|
679
|
+
})
|
|
680
|
+
|
|
681
|
+
# Create the new user message with image content
|
|
682
|
+
new_message = {
|
|
683
|
+
"role": "user",
|
|
684
|
+
"content": content
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
# Replace the last message (which was just text) with our multimodal message
|
|
688
|
+
if messages and messages[-1]["role"] == "user":
|
|
689
|
+
messages[-1] = new_message
|
|
690
|
+
else:
|
|
691
|
+
messages.append(new_message)
|
|
692
|
+
|
|
693
|
+
# Prepare the payload
|
|
694
|
+
payload = {
|
|
695
|
+
"model": model.value if isinstance(model, ClaudeModel) else model,
|
|
696
|
+
"max_tokens": max_tokens or self.max_tokens,
|
|
697
|
+
"temperature": temperature or self.temperature,
|
|
698
|
+
"messages": messages
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
# Add system prompt for structured output
|
|
702
|
+
if structured_output:
|
|
703
|
+
structured_system_prompt = "You are a precise assistant that responds only with valid JSON when requested. When asked for structured output, respond with ONLY the JSON object, no additional text, explanations, or markdown formatting."
|
|
704
|
+
if system_prompt:
|
|
705
|
+
payload["system"] = f"{system_prompt}\n\n{structured_system_prompt}"
|
|
706
|
+
else:
|
|
707
|
+
payload["system"] = structured_system_prompt
|
|
708
|
+
elif system_prompt:
|
|
709
|
+
payload["system"] = system_prompt
|
|
710
|
+
|
|
711
|
+
if count_objects and not structured_output:
|
|
712
|
+
# Import ObjectDetectionResult from models
|
|
713
|
+
try:
|
|
714
|
+
structured_output = ObjectDetectionResult
|
|
715
|
+
except ImportError:
|
|
716
|
+
# Fallback - define a simple structure if import fails
|
|
717
|
+
class SimpleObjectDetection(BaseModel):
|
|
718
|
+
"""Simple object detection result structure."""
|
|
719
|
+
analysis: str = Field(description="Detailed analysis of the image")
|
|
720
|
+
total_count: int = Field(description="Total number of objects detected")
|
|
721
|
+
objects: TypingList[str] = Field(
|
|
722
|
+
default_factory=list,
|
|
723
|
+
description="List of detected objects"
|
|
724
|
+
)
|
|
725
|
+
|
|
726
|
+
structured_output = SimpleObjectDetection
|
|
727
|
+
output_config = StructuredOutputConfig(
|
|
728
|
+
output_type=structured_output,
|
|
729
|
+
format=OutputFormat.JSON
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Note: Claude's vision models typically don't support tool calling
|
|
733
|
+
# So we skip tool preparation for vision requests
|
|
734
|
+
# Track tool calls (will likely be empty for vision requests)
|
|
735
|
+
all_tool_calls = []
|
|
736
|
+
|
|
737
|
+
# Make the API request using SDK
|
|
738
|
+
response = await self.client.messages.create(**payload)
|
|
739
|
+
result = response.model_dump()
|
|
740
|
+
|
|
741
|
+
# Handle structured output
|
|
742
|
+
final_output = None
|
|
743
|
+
text_content = ""
|
|
744
|
+
|
|
745
|
+
# Extract text content from Claude's response
|
|
746
|
+
for content_block in result.get("content", []):
|
|
747
|
+
if content_block.get("type") == "text":
|
|
748
|
+
text_content += content_block.get("text", "")
|
|
749
|
+
|
|
750
|
+
if structured_output:
|
|
751
|
+
try:
|
|
752
|
+
final_output = await self._parse_structured_output(
|
|
753
|
+
text_content,
|
|
754
|
+
output_config
|
|
755
|
+
)
|
|
756
|
+
except Exception:
|
|
757
|
+
final_output = text_content
|
|
758
|
+
else:
|
|
759
|
+
final_output = text_content
|
|
760
|
+
|
|
761
|
+
# Add assistant response to messages for conversation memory
|
|
762
|
+
assistant_message = {"role": "assistant", "content": result["content"]}
|
|
763
|
+
messages.append(assistant_message)
|
|
764
|
+
|
|
765
|
+
# Update conversation memory
|
|
766
|
+
tools_used = [tc.name for tc in all_tool_calls]
|
|
767
|
+
await self._update_conversation_memory(
|
|
768
|
+
user_id,
|
|
769
|
+
session_id,
|
|
770
|
+
conversation_history,
|
|
771
|
+
messages + [{"role": "assistant", "content": result["content"]}],
|
|
772
|
+
system_prompt,
|
|
773
|
+
turn_id,
|
|
774
|
+
f"[Image Analysis]: {original_prompt}", # Include image context in the stored prompt
|
|
775
|
+
text_content,
|
|
776
|
+
tools_used
|
|
777
|
+
)
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
# Create AIMessage using factory
|
|
782
|
+
ai_message = AIMessageFactory.from_claude(
|
|
783
|
+
response=result,
|
|
784
|
+
input_text=f"[Image Analysis]: {original_prompt}",
|
|
785
|
+
model=model.value if isinstance(model, ClaudeModel) else model,
|
|
786
|
+
user_id=user_id,
|
|
787
|
+
session_id=session_id,
|
|
788
|
+
turn_id=turn_id,
|
|
789
|
+
structured_output=final_output,
|
|
790
|
+
tool_calls=all_tool_calls
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
# Ensure text field is properly set for property access
|
|
794
|
+
if not structured_output:
|
|
795
|
+
ai_message.response = final_output
|
|
796
|
+
|
|
797
|
+
return ai_message
|
|
798
|
+
|
|
799
|
+
async def summarize_text(
|
|
800
|
+
self,
|
|
801
|
+
text: str,
|
|
802
|
+
max_length: int = 500,
|
|
803
|
+
min_length: int = 100,
|
|
804
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
805
|
+
temperature: Optional[float] = None,
|
|
806
|
+
user_id: Optional[str] = None,
|
|
807
|
+
session_id: Optional[str] = None,
|
|
808
|
+
) -> AIMessage:
|
|
809
|
+
"""
|
|
810
|
+
Generates a summary for a given text in a stateless manner.
|
|
811
|
+
|
|
812
|
+
Args:
|
|
813
|
+
text (str): The text content to summarize.
|
|
814
|
+
max_length (int): The maximum desired character length for the summary.
|
|
815
|
+
min_length (int): The minimum desired character length for the summary.
|
|
816
|
+
model (Union[ClaudeModel, str]): The model to use.
|
|
817
|
+
temperature (float): Sampling temperature for response generation.
|
|
818
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
819
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
820
|
+
"""
|
|
821
|
+
if not self.client:
|
|
822
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
823
|
+
|
|
824
|
+
self.logger.info(
|
|
825
|
+
f"Generating summary for text: '{text[:50]}...'"
|
|
826
|
+
)
|
|
827
|
+
|
|
828
|
+
# Generate unique turn ID for tracking
|
|
829
|
+
turn_id = str(uuid.uuid4())
|
|
830
|
+
|
|
831
|
+
# Define the specific system prompt for summarization
|
|
832
|
+
system_prompt = f"""Your job is to produce a final summary from the following text and identify the main theme.
|
|
833
|
+
- The summary should be concise and to the point.
|
|
834
|
+
- The summary should be no longer than {max_length} characters and no less than {min_length} characters.
|
|
835
|
+
- The summary should be in a single paragraph.
|
|
836
|
+
- Focus on the key information and main points.
|
|
837
|
+
- Write in clear, accessible language."""
|
|
838
|
+
|
|
839
|
+
# Prepare the message for Claude
|
|
840
|
+
messages = [{
|
|
841
|
+
"role": "user",
|
|
842
|
+
"content": [{"type": "text", "text": text}]
|
|
843
|
+
}]
|
|
844
|
+
|
|
845
|
+
payload = {
|
|
846
|
+
"model": model.value if isinstance(model, Enum) else model,
|
|
847
|
+
"max_tokens": self.max_tokens,
|
|
848
|
+
"temperature": temperature or self.temperature,
|
|
849
|
+
"messages": messages,
|
|
850
|
+
"system": system_prompt
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
# Make a stateless call to Claude using SDK
|
|
854
|
+
response = await self.client.messages.create(**payload)
|
|
855
|
+
result = response.model_dump()
|
|
856
|
+
|
|
857
|
+
# Create AIMessage using factory
|
|
858
|
+
ai_message = AIMessageFactory.from_claude(
|
|
859
|
+
response=result,
|
|
860
|
+
input_text=text,
|
|
861
|
+
model=model,
|
|
862
|
+
user_id=user_id,
|
|
863
|
+
session_id=session_id,
|
|
864
|
+
turn_id=turn_id,
|
|
865
|
+
structured_output=None,
|
|
866
|
+
tool_calls=[]
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
return ai_message
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
async def translate_text(
|
|
873
|
+
self,
|
|
874
|
+
text: str,
|
|
875
|
+
target_lang: str,
|
|
876
|
+
source_lang: Optional[str] = None,
|
|
877
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
878
|
+
temperature: Optional[float] = 0.2,
|
|
879
|
+
user_id: Optional[str] = None,
|
|
880
|
+
session_id: Optional[str] = None,
|
|
881
|
+
) -> AIMessage:
|
|
882
|
+
"""
|
|
883
|
+
Translates a given text from a source language to a target language.
|
|
884
|
+
|
|
885
|
+
Args:
|
|
886
|
+
text (str): The text content to translate.
|
|
887
|
+
target_lang (str): The target language name or ISO code (e.g., 'Spanish', 'es', 'French', 'fr').
|
|
888
|
+
source_lang (Optional[str]): The source language name or ISO code.
|
|
889
|
+
If None, Claude will attempt to detect it.
|
|
890
|
+
model (Union[ClaudeModel, str]): The model to use. Defaults to SONNET_4.
|
|
891
|
+
temperature (float): Sampling temperature for response generation.
|
|
892
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
893
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
894
|
+
"""
|
|
895
|
+
if not self.client:
|
|
896
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
897
|
+
|
|
898
|
+
self.logger.info(
|
|
899
|
+
f"Translating text to '{target_lang}': '{text[:50]}...'"
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
# Generate unique turn ID for tracking
|
|
903
|
+
turn_id = str(uuid.uuid4())
|
|
904
|
+
|
|
905
|
+
# Construct the system prompt for translation
|
|
906
|
+
if source_lang:
|
|
907
|
+
system_prompt = f"""You are a professional translator. Translate the following text from {source_lang} to {target_lang}.
|
|
908
|
+
Requirements:
|
|
909
|
+
- Provide only the translated text, without any additional comments or explanations
|
|
910
|
+
- Maintain the original meaning and tone
|
|
911
|
+
- Use natural, fluent language in the target language
|
|
912
|
+
- Preserve formatting if present (like line breaks, bullet points, etc.)
|
|
913
|
+
- If there are proper nouns or technical terms, keep them appropriate for the target language context""" # noqa
|
|
914
|
+
else:
|
|
915
|
+
system_prompt = f"""You are a professional translator. First, detect the source language of the following text, then translate it to {target_lang}.
|
|
916
|
+
Requirements:
|
|
917
|
+
- Provide only the translated text, without any additional comments or explanations
|
|
918
|
+
- Maintain the original meaning and tone
|
|
919
|
+
- Use natural, fluent language in the target language
|
|
920
|
+
- Preserve formatting if present (like line breaks, bullet points, etc.)
|
|
921
|
+
- If there are proper nouns or technical terms, keep them appropriate for the target language context""" # noqa
|
|
922
|
+
|
|
923
|
+
# Prepare the message for Claude
|
|
924
|
+
messages = [{
|
|
925
|
+
"role": "user",
|
|
926
|
+
"content": [{"type": "text", "text": text}]
|
|
927
|
+
}]
|
|
928
|
+
|
|
929
|
+
payload = {
|
|
930
|
+
"model": model.value if isinstance(model, Enum) else model,
|
|
931
|
+
"max_tokens": self.max_tokens,
|
|
932
|
+
"temperature": temperature,
|
|
933
|
+
"messages": messages,
|
|
934
|
+
"system": system_prompt
|
|
935
|
+
}
|
|
936
|
+
|
|
937
|
+
# Make a stateless call to Claude using SDK
|
|
938
|
+
response = await self.client.messages.create(**payload)
|
|
939
|
+
result = response.model_dump()
|
|
940
|
+
|
|
941
|
+
# Create AIMessage using factory
|
|
942
|
+
ai_message = AIMessageFactory.from_claude(
|
|
943
|
+
response=result,
|
|
944
|
+
input_text=text,
|
|
945
|
+
model=model,
|
|
946
|
+
user_id=user_id,
|
|
947
|
+
session_id=session_id,
|
|
948
|
+
turn_id=turn_id,
|
|
949
|
+
structured_output=None,
|
|
950
|
+
tool_calls=[]
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
return ai_message
|
|
954
|
+
|
|
955
|
+
|
|
956
|
+
# Additional helper methods you might want to add
|
|
957
|
+
|
|
958
|
+
async def extract_key_points(
|
|
959
|
+
self,
|
|
960
|
+
text: str,
|
|
961
|
+
num_points: int = 5,
|
|
962
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
963
|
+
temperature: Optional[float] = 0.3,
|
|
964
|
+
user_id: Optional[str] = None,
|
|
965
|
+
session_id: Optional[str] = None,
|
|
966
|
+
) -> AIMessage:
|
|
967
|
+
"""
|
|
968
|
+
Extract key points from a given text.
|
|
969
|
+
|
|
970
|
+
Args:
|
|
971
|
+
text (str): The text content to analyze.
|
|
972
|
+
num_points (int): The number of key points to extract.
|
|
973
|
+
model (Union[ClaudeModel, str]): The model to use.
|
|
974
|
+
temperature (float): Sampling temperature for response generation.
|
|
975
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
976
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
977
|
+
"""
|
|
978
|
+
if not self.client:
|
|
979
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
980
|
+
|
|
981
|
+
turn_id = str(uuid.uuid4())
|
|
982
|
+
|
|
983
|
+
system_prompt = f"""Extract the {num_points} most important key points from the following text.
|
|
984
|
+
Requirements:
|
|
985
|
+
- Present each point as a clear, concise bullet point
|
|
986
|
+
- Focus on the main ideas and significant information
|
|
987
|
+
- Each point should be self-contained and meaningful
|
|
988
|
+
- Order points by importance (most important first)
|
|
989
|
+
- Use bullet points (•) to format the list"""
|
|
990
|
+
|
|
991
|
+
messages = [{
|
|
992
|
+
"role": "user",
|
|
993
|
+
"content": [{"type": "text", "text": text}]
|
|
994
|
+
}]
|
|
995
|
+
|
|
996
|
+
payload = {
|
|
997
|
+
"model": model.value if isinstance(model, Enum) else model,
|
|
998
|
+
"max_tokens": self.max_tokens,
|
|
999
|
+
"temperature": temperature,
|
|
1000
|
+
"messages": messages,
|
|
1001
|
+
"system": system_prompt
|
|
1002
|
+
}
|
|
1003
|
+
|
|
1004
|
+
response = await self.client.messages.create(**payload)
|
|
1005
|
+
result = response.model_dump()
|
|
1006
|
+
|
|
1007
|
+
return AIMessageFactory.from_claude(
|
|
1008
|
+
response=result,
|
|
1009
|
+
input_text=text,
|
|
1010
|
+
model=model,
|
|
1011
|
+
user_id=user_id,
|
|
1012
|
+
session_id=session_id,
|
|
1013
|
+
turn_id=turn_id,
|
|
1014
|
+
structured_output=None,
|
|
1015
|
+
tool_calls=[]
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
async def analyze_sentiment(
|
|
1020
|
+
self,
|
|
1021
|
+
text: str,
|
|
1022
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
1023
|
+
temperature: Optional[float] = 0.1,
|
|
1024
|
+
user_id: Optional[str] = None,
|
|
1025
|
+
session_id: Optional[str] = None,
|
|
1026
|
+
use_structured: bool = False,
|
|
1027
|
+
) -> AIMessage:
|
|
1028
|
+
"""
|
|
1029
|
+
Analyze the sentiment of a given text.
|
|
1030
|
+
|
|
1031
|
+
Args:
|
|
1032
|
+
text (str): The text content to analyze.
|
|
1033
|
+
model (Union[ClaudeModel, str]): The model to use.
|
|
1034
|
+
temperature (float): Sampling temperature for response generation.
|
|
1035
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
1036
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
1037
|
+
"""
|
|
1038
|
+
if not self.client:
|
|
1039
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
1040
|
+
|
|
1041
|
+
turn_id = str(uuid.uuid4())
|
|
1042
|
+
if use_structured:
|
|
1043
|
+
system_prompt = """You are a sentiment analysis expert.
|
|
1044
|
+
Analyze the sentiment of the given text and respond with valid JSON matching this exact schema:
|
|
1045
|
+
{
|
|
1046
|
+
"sentiment": "positive" | "negative" | "neutral" | "mixed",
|
|
1047
|
+
"confidence_level": 0.0-1.0,
|
|
1048
|
+
"emotional_indicators": ["word1", "phrase2", ...],
|
|
1049
|
+
"reason": "explanation of analysis"
|
|
1050
|
+
}
|
|
1051
|
+
Respond only with valid JSON, no additional text."""
|
|
1052
|
+
else:
|
|
1053
|
+
system_prompt = """
|
|
1054
|
+
Analyze the sentiment of the following text and provide a structured response.
|
|
1055
|
+
Your response should include:
|
|
1056
|
+
1. Overall sentiment (Positive, Negative, Neutral, or Mixed)
|
|
1057
|
+
2. Confidence level (High, Medium, Low)
|
|
1058
|
+
3. Key emotional indicators found in the text
|
|
1059
|
+
4. Brief explanation of your analysis
|
|
1060
|
+
Format your response clearly with these sections.
|
|
1061
|
+
"""
|
|
1062
|
+
|
|
1063
|
+
messages = [{
|
|
1064
|
+
"role": "user",
|
|
1065
|
+
"content": [{"type": "text", "text": text}]
|
|
1066
|
+
}]
|
|
1067
|
+
|
|
1068
|
+
payload = {
|
|
1069
|
+
"model": model.value if isinstance(model, Enum) else model,
|
|
1070
|
+
"max_tokens": self.max_tokens,
|
|
1071
|
+
"temperature": temperature,
|
|
1072
|
+
"messages": messages,
|
|
1073
|
+
"system": system_prompt
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
response = await self.client.messages.create(**payload)
|
|
1077
|
+
structured_output = SentimentAnalysis if use_structured else None
|
|
1078
|
+
return AIMessageFactory.from_claude(
|
|
1079
|
+
response=result,
|
|
1080
|
+
input_text=f"Review: {text[:100]}...", # Changed from 'text' to f"Review: {text[:100]}..."
|
|
1081
|
+
model=model,
|
|
1082
|
+
user_id=user_id, # Kept user_id
|
|
1083
|
+
session_id=session_id, # Kept session_id
|
|
1084
|
+
turn_id=turn_id, # Kept turn_id
|
|
1085
|
+
structured_output=structured_output, # Kept structured_output
|
|
1086
|
+
tool_calls=[]
|
|
1087
|
+
)
|
|
1088
|
+
|
|
1089
|
+
def _get_deep_research_system_prompt(self) -> str:
|
|
1090
|
+
"""Generate a specialized system prompt for deep research mode.
|
|
1091
|
+
|
|
1092
|
+
This prompt encourages thorough, methodical research with iterative refinement.
|
|
1093
|
+
"""
|
|
1094
|
+
return """You are in DEEP RESEARCH mode. Your task is to conduct thorough, comprehensive research on the given topic.
|
|
1095
|
+
|
|
1096
|
+
Follow this methodology:
|
|
1097
|
+
1. **Initial Analysis**: Break down the research question into key components
|
|
1098
|
+
2. **Systematic Investigation**: Use available tools to gather information from multiple sources
|
|
1099
|
+
3. **Critical Evaluation**: Assess the credibility and relevance of each source
|
|
1100
|
+
4. **Synthesis**: Combine findings into a coherent, well-structured response
|
|
1101
|
+
5. **Verification**: Cross-reference facts and verify claims when possible
|
|
1102
|
+
|
|
1103
|
+
Research Guidelines:
|
|
1104
|
+
- Be comprehensive: explore multiple angles and perspectives
|
|
1105
|
+
- Be critical: evaluate source quality and potential biases
|
|
1106
|
+
- Be thorough: don't stop at surface-level information
|
|
1107
|
+
- Be structured: organize findings logically
|
|
1108
|
+
- Be accurate: cite sources and acknowledge uncertainty when appropriate
|
|
1109
|
+
|
|
1110
|
+
If tools are available, use them strategically to:
|
|
1111
|
+
- Search for current information
|
|
1112
|
+
- Verify facts across multiple sources
|
|
1113
|
+
- Gather diverse perspectives
|
|
1114
|
+
- Access specialized knowledge bases
|
|
1115
|
+
|
|
1116
|
+
Provide your final answer with:
|
|
1117
|
+
- Clear, well-organized structure
|
|
1118
|
+
- Supporting evidence for key claims
|
|
1119
|
+
- Acknowledgment of limitations or gaps in available information
|
|
1120
|
+
- Relevant citations or references when applicable"""
|
|
1121
|
+
|
|
1122
|
+
async def analyze_product_review(
|
|
1123
|
+
self,
|
|
1124
|
+
review_text: str,
|
|
1125
|
+
product_id: str,
|
|
1126
|
+
product_name: str,
|
|
1127
|
+
model: Union[ClaudeModel, str] = ClaudeModel.SONNET_4,
|
|
1128
|
+
temperature: Optional[float] = 0.1,
|
|
1129
|
+
user_id: Optional[str] = None,
|
|
1130
|
+
session_id: Optional[str] = None,
|
|
1131
|
+
) -> AIMessage:
|
|
1132
|
+
"""
|
|
1133
|
+
Analyze a product review and extract structured information.
|
|
1134
|
+
|
|
1135
|
+
Args:
|
|
1136
|
+
review_text (str): The product review text to analyze.
|
|
1137
|
+
product_id (str): Unique identifier for the product.
|
|
1138
|
+
product_name (str): Name of the product being reviewed.
|
|
1139
|
+
model (Union[ClaudeModel, str]): The model to use.
|
|
1140
|
+
temperature (float): Sampling temperature for response generation.
|
|
1141
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
1142
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
1143
|
+
"""
|
|
1144
|
+
if not self.client:
|
|
1145
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
1146
|
+
|
|
1147
|
+
turn_id = str(uuid.uuid4())
|
|
1148
|
+
|
|
1149
|
+
system_prompt = f"""You are a product review analysis expert. Analyze the given product review and respond with valid JSON matching this exact schema:
|
|
1150
|
+
|
|
1151
|
+
{{
|
|
1152
|
+
"product_id": "{product_id}",
|
|
1153
|
+
"product_name": "{product_name}",
|
|
1154
|
+
"review_text": "original review text",
|
|
1155
|
+
"rating": 0.0-5.0,
|
|
1156
|
+
"sentiment": "positive" | "negative" | "neutral",
|
|
1157
|
+
"key_features": ["feature1", "feature2", ...]
|
|
1158
|
+
}}
|
|
1159
|
+
|
|
1160
|
+
Extract the rating based on the review content (estimate if not explicitly stated), determine sentiment, and identify key product features mentioned. Respond only with valid JSON, no additional text."""
|
|
1161
|
+
|
|
1162
|
+
messages = [{
|
|
1163
|
+
"role": "user",
|
|
1164
|
+
"content": [{"type": "text", "text": f"Product ID: {product_id}\nProduct Name: {product_name}\nReview: {review_text}"}]
|
|
1165
|
+
}]
|
|
1166
|
+
|
|
1167
|
+
payload = {
|
|
1168
|
+
"model": model.value if isinstance(model, Enum) else model,
|
|
1169
|
+
"max_tokens": self.max_tokens,
|
|
1170
|
+
"temperature": temperature,
|
|
1171
|
+
"messages": messages,
|
|
1172
|
+
"system": system_prompt
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
response = await self.client.messages.create(**payload)
|
|
1176
|
+
result = response.model_dump()
|
|
1177
|
+
|
|
1178
|
+
return AIMessageFactory.from_claude(
|
|
1179
|
+
response=result,
|
|
1180
|
+
input_text=review_text,
|
|
1181
|
+
model=model,
|
|
1182
|
+
user_id=user_id,
|
|
1183
|
+
session_id=session_id,
|
|
1184
|
+
turn_id=turn_id,
|
|
1185
|
+
structured_output=ProductReview,
|
|
1186
|
+
tool_calls=[]
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
|
|
1190
|
+
# Backward compatibility alias
|
|
1191
|
+
ClaudeClient = AnthropicClient
|