ai-parrot 0.17.2__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentui/.prettierrc +15 -0
- agentui/QUICKSTART.md +272 -0
- agentui/README.md +59 -0
- agentui/env.example +16 -0
- agentui/jsconfig.json +14 -0
- agentui/package-lock.json +4242 -0
- agentui/package.json +34 -0
- agentui/scripts/postinstall/apply-patches.mjs +260 -0
- agentui/src/app.css +61 -0
- agentui/src/app.d.ts +13 -0
- agentui/src/app.html +12 -0
- agentui/src/components/LoadingSpinner.svelte +64 -0
- agentui/src/components/ThemeSwitcher.svelte +159 -0
- agentui/src/components/index.js +4 -0
- agentui/src/lib/api/bots.ts +60 -0
- agentui/src/lib/api/chat.ts +22 -0
- agentui/src/lib/api/http.ts +25 -0
- agentui/src/lib/components/BotCard.svelte +33 -0
- agentui/src/lib/components/ChatBubble.svelte +63 -0
- agentui/src/lib/components/Toast.svelte +21 -0
- agentui/src/lib/config.ts +20 -0
- agentui/src/lib/stores/auth.svelte.ts +73 -0
- agentui/src/lib/stores/theme.svelte.js +64 -0
- agentui/src/lib/stores/toast.svelte.ts +31 -0
- agentui/src/lib/utils/conversation.ts +39 -0
- agentui/src/routes/+layout.svelte +20 -0
- agentui/src/routes/+page.svelte +232 -0
- agentui/src/routes/login/+page.svelte +200 -0
- agentui/src/routes/talk/[agentId]/+page.svelte +297 -0
- agentui/src/routes/talk/[agentId]/+page.ts +7 -0
- agentui/static/README.md +1 -0
- agentui/svelte.config.js +11 -0
- agentui/tailwind.config.ts +53 -0
- agentui/tsconfig.json +3 -0
- agentui/vite.config.ts +10 -0
- ai_parrot-0.17.2.dist-info/METADATA +472 -0
- ai_parrot-0.17.2.dist-info/RECORD +535 -0
- ai_parrot-0.17.2.dist-info/WHEEL +6 -0
- ai_parrot-0.17.2.dist-info/entry_points.txt +2 -0
- ai_parrot-0.17.2.dist-info/licenses/LICENSE +21 -0
- ai_parrot-0.17.2.dist-info/top_level.txt +6 -0
- crew-builder/.prettierrc +15 -0
- crew-builder/QUICKSTART.md +259 -0
- crew-builder/README.md +113 -0
- crew-builder/env.example +17 -0
- crew-builder/jsconfig.json +14 -0
- crew-builder/package-lock.json +4182 -0
- crew-builder/package.json +37 -0
- crew-builder/scripts/postinstall/apply-patches.mjs +260 -0
- crew-builder/src/app.css +62 -0
- crew-builder/src/app.d.ts +13 -0
- crew-builder/src/app.html +12 -0
- crew-builder/src/components/LoadingSpinner.svelte +64 -0
- crew-builder/src/components/ThemeSwitcher.svelte +149 -0
- crew-builder/src/components/index.js +9 -0
- crew-builder/src/lib/api/bots.ts +60 -0
- crew-builder/src/lib/api/chat.ts +80 -0
- crew-builder/src/lib/api/client.ts +56 -0
- crew-builder/src/lib/api/crew/crew.ts +136 -0
- crew-builder/src/lib/api/index.ts +5 -0
- crew-builder/src/lib/api/o365/auth.ts +65 -0
- crew-builder/src/lib/auth/auth.ts +54 -0
- crew-builder/src/lib/components/AgentNode.svelte +43 -0
- crew-builder/src/lib/components/BotCard.svelte +33 -0
- crew-builder/src/lib/components/ChatBubble.svelte +67 -0
- crew-builder/src/lib/components/ConfigPanel.svelte +278 -0
- crew-builder/src/lib/components/JsonTreeNode.svelte +76 -0
- crew-builder/src/lib/components/JsonViewer.svelte +24 -0
- crew-builder/src/lib/components/MarkdownEditor.svelte +48 -0
- crew-builder/src/lib/components/ThemeToggle.svelte +36 -0
- crew-builder/src/lib/components/Toast.svelte +67 -0
- crew-builder/src/lib/components/Toolbar.svelte +157 -0
- crew-builder/src/lib/components/index.ts +10 -0
- crew-builder/src/lib/config.ts +8 -0
- crew-builder/src/lib/stores/auth.svelte.ts +228 -0
- crew-builder/src/lib/stores/crewStore.ts +369 -0
- crew-builder/src/lib/stores/theme.svelte.js +145 -0
- crew-builder/src/lib/stores/toast.svelte.ts +69 -0
- crew-builder/src/lib/utils/conversation.ts +39 -0
- crew-builder/src/lib/utils/markdown.ts +122 -0
- crew-builder/src/lib/utils/talkHistory.ts +47 -0
- crew-builder/src/routes/+layout.svelte +20 -0
- crew-builder/src/routes/+page.svelte +539 -0
- crew-builder/src/routes/agents/+page.svelte +247 -0
- crew-builder/src/routes/agents/[agentId]/+page.svelte +288 -0
- crew-builder/src/routes/agents/[agentId]/+page.ts +7 -0
- crew-builder/src/routes/builder/+page.svelte +204 -0
- crew-builder/src/routes/crew/ask/+page.svelte +1052 -0
- crew-builder/src/routes/crew/ask/+page.ts +1 -0
- crew-builder/src/routes/integrations/o365/+page.svelte +304 -0
- crew-builder/src/routes/login/+page.svelte +197 -0
- crew-builder/src/routes/talk/[agentId]/+page.svelte +487 -0
- crew-builder/src/routes/talk/[agentId]/+page.ts +7 -0
- crew-builder/static/README.md +1 -0
- crew-builder/svelte.config.js +11 -0
- crew-builder/tailwind.config.ts +53 -0
- crew-builder/tsconfig.json +3 -0
- crew-builder/vite.config.ts +10 -0
- mcp_servers/calculator_server.py +309 -0
- parrot/__init__.py +27 -0
- parrot/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/__pycache__/version.cpython-310.pyc +0 -0
- parrot/_version.py +34 -0
- parrot/a2a/__init__.py +48 -0
- parrot/a2a/client.py +658 -0
- parrot/a2a/discovery.py +89 -0
- parrot/a2a/mixin.py +257 -0
- parrot/a2a/models.py +376 -0
- parrot/a2a/server.py +770 -0
- parrot/agents/__init__.py +29 -0
- parrot/bots/__init__.py +12 -0
- parrot/bots/a2a_agent.py +19 -0
- parrot/bots/abstract.py +3139 -0
- parrot/bots/agent.py +1129 -0
- parrot/bots/basic.py +9 -0
- parrot/bots/chatbot.py +669 -0
- parrot/bots/data.py +1618 -0
- parrot/bots/database/__init__.py +5 -0
- parrot/bots/database/abstract.py +3071 -0
- parrot/bots/database/cache.py +286 -0
- parrot/bots/database/models.py +468 -0
- parrot/bots/database/prompts.py +154 -0
- parrot/bots/database/retries.py +98 -0
- parrot/bots/database/router.py +269 -0
- parrot/bots/database/sql.py +41 -0
- parrot/bots/db/__init__.py +6 -0
- parrot/bots/db/abstract.py +556 -0
- parrot/bots/db/bigquery.py +602 -0
- parrot/bots/db/cache.py +85 -0
- parrot/bots/db/documentdb.py +668 -0
- parrot/bots/db/elastic.py +1014 -0
- parrot/bots/db/influx.py +898 -0
- parrot/bots/db/mock.py +96 -0
- parrot/bots/db/multi.py +783 -0
- parrot/bots/db/prompts.py +185 -0
- parrot/bots/db/sql.py +1255 -0
- parrot/bots/db/tools.py +212 -0
- parrot/bots/document.py +680 -0
- parrot/bots/hrbot.py +15 -0
- parrot/bots/kb.py +170 -0
- parrot/bots/mcp.py +36 -0
- parrot/bots/orchestration/README.md +463 -0
- parrot/bots/orchestration/__init__.py +1 -0
- parrot/bots/orchestration/agent.py +155 -0
- parrot/bots/orchestration/crew.py +3330 -0
- parrot/bots/orchestration/fsm.py +1179 -0
- parrot/bots/orchestration/hr.py +434 -0
- parrot/bots/orchestration/storage/__init__.py +4 -0
- parrot/bots/orchestration/storage/memory.py +100 -0
- parrot/bots/orchestration/storage/mixin.py +119 -0
- parrot/bots/orchestration/verify.py +202 -0
- parrot/bots/product.py +204 -0
- parrot/bots/prompts/__init__.py +96 -0
- parrot/bots/prompts/agents.py +155 -0
- parrot/bots/prompts/data.py +216 -0
- parrot/bots/prompts/output_generation.py +8 -0
- parrot/bots/scraper/__init__.py +3 -0
- parrot/bots/scraper/models.py +122 -0
- parrot/bots/scraper/scraper.py +1173 -0
- parrot/bots/scraper/templates.py +115 -0
- parrot/bots/stores/__init__.py +5 -0
- parrot/bots/stores/local.py +172 -0
- parrot/bots/webdev.py +81 -0
- parrot/cli.py +17 -0
- parrot/clients/__init__.py +16 -0
- parrot/clients/base.py +1491 -0
- parrot/clients/claude.py +1191 -0
- parrot/clients/factory.py +129 -0
- parrot/clients/google.py +4567 -0
- parrot/clients/gpt.py +1975 -0
- parrot/clients/grok.py +432 -0
- parrot/clients/groq.py +986 -0
- parrot/clients/hf.py +582 -0
- parrot/clients/models.py +18 -0
- parrot/conf.py +395 -0
- parrot/embeddings/__init__.py +9 -0
- parrot/embeddings/base.py +157 -0
- parrot/embeddings/google.py +98 -0
- parrot/embeddings/huggingface.py +74 -0
- parrot/embeddings/openai.py +84 -0
- parrot/embeddings/processor.py +88 -0
- parrot/exceptions.c +13868 -0
- parrot/exceptions.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/exceptions.pxd +22 -0
- parrot/exceptions.pxi +15 -0
- parrot/exceptions.pyx +44 -0
- parrot/generators/__init__.py +29 -0
- parrot/generators/base.py +200 -0
- parrot/generators/html.py +293 -0
- parrot/generators/react.py +205 -0
- parrot/generators/streamlit.py +203 -0
- parrot/generators/template.py +105 -0
- parrot/handlers/__init__.py +4 -0
- parrot/handlers/agent.py +861 -0
- parrot/handlers/agents/__init__.py +1 -0
- parrot/handlers/agents/abstract.py +900 -0
- parrot/handlers/bots.py +338 -0
- parrot/handlers/chat.py +915 -0
- parrot/handlers/creation.sql +192 -0
- parrot/handlers/crew/ARCHITECTURE.md +362 -0
- parrot/handlers/crew/README_BOTMANAGER_PERSISTENCE.md +303 -0
- parrot/handlers/crew/README_REDIS_PERSISTENCE.md +366 -0
- parrot/handlers/crew/__init__.py +0 -0
- parrot/handlers/crew/handler.py +801 -0
- parrot/handlers/crew/models.py +229 -0
- parrot/handlers/crew/redis_persistence.py +523 -0
- parrot/handlers/jobs/__init__.py +10 -0
- parrot/handlers/jobs/job.py +384 -0
- parrot/handlers/jobs/mixin.py +627 -0
- parrot/handlers/jobs/models.py +115 -0
- parrot/handlers/jobs/worker.py +31 -0
- parrot/handlers/models.py +596 -0
- parrot/handlers/o365_auth.py +105 -0
- parrot/handlers/stream.py +337 -0
- parrot/interfaces/__init__.py +6 -0
- parrot/interfaces/aws.py +143 -0
- parrot/interfaces/credentials.py +113 -0
- parrot/interfaces/database.py +27 -0
- parrot/interfaces/google.py +1123 -0
- parrot/interfaces/hierarchy.py +1227 -0
- parrot/interfaces/http.py +651 -0
- parrot/interfaces/images/__init__.py +0 -0
- parrot/interfaces/images/plugins/__init__.py +24 -0
- parrot/interfaces/images/plugins/abstract.py +58 -0
- parrot/interfaces/images/plugins/analisys.py +148 -0
- parrot/interfaces/images/plugins/classify.py +150 -0
- parrot/interfaces/images/plugins/classifybase.py +182 -0
- parrot/interfaces/images/plugins/detect.py +150 -0
- parrot/interfaces/images/plugins/exif.py +1103 -0
- parrot/interfaces/images/plugins/hash.py +52 -0
- parrot/interfaces/images/plugins/vision.py +104 -0
- parrot/interfaces/images/plugins/yolo.py +66 -0
- parrot/interfaces/images/plugins/zerodetect.py +197 -0
- parrot/interfaces/o365.py +978 -0
- parrot/interfaces/onedrive.py +822 -0
- parrot/interfaces/sharepoint.py +1435 -0
- parrot/interfaces/soap.py +257 -0
- parrot/loaders/__init__.py +8 -0
- parrot/loaders/abstract.py +1131 -0
- parrot/loaders/audio.py +199 -0
- parrot/loaders/basepdf.py +53 -0
- parrot/loaders/basevideo.py +1568 -0
- parrot/loaders/csv.py +409 -0
- parrot/loaders/docx.py +116 -0
- parrot/loaders/epubloader.py +316 -0
- parrot/loaders/excel.py +199 -0
- parrot/loaders/factory.py +55 -0
- parrot/loaders/files/__init__.py +0 -0
- parrot/loaders/files/abstract.py +39 -0
- parrot/loaders/files/html.py +26 -0
- parrot/loaders/files/text.py +63 -0
- parrot/loaders/html.py +152 -0
- parrot/loaders/markdown.py +442 -0
- parrot/loaders/pdf.py +373 -0
- parrot/loaders/pdfmark.py +320 -0
- parrot/loaders/pdftables.py +506 -0
- parrot/loaders/ppt.py +476 -0
- parrot/loaders/qa.py +63 -0
- parrot/loaders/splitters/__init__.py +10 -0
- parrot/loaders/splitters/base.py +138 -0
- parrot/loaders/splitters/md.py +228 -0
- parrot/loaders/splitters/token.py +143 -0
- parrot/loaders/txt.py +26 -0
- parrot/loaders/video.py +89 -0
- parrot/loaders/videolocal.py +218 -0
- parrot/loaders/videounderstanding.py +377 -0
- parrot/loaders/vimeo.py +167 -0
- parrot/loaders/web.py +599 -0
- parrot/loaders/youtube.py +504 -0
- parrot/manager/__init__.py +5 -0
- parrot/manager/manager.py +1030 -0
- parrot/mcp/__init__.py +28 -0
- parrot/mcp/adapter.py +105 -0
- parrot/mcp/cli.py +174 -0
- parrot/mcp/client.py +119 -0
- parrot/mcp/config.py +75 -0
- parrot/mcp/integration.py +842 -0
- parrot/mcp/oauth.py +933 -0
- parrot/mcp/server.py +225 -0
- parrot/mcp/transports/__init__.py +3 -0
- parrot/mcp/transports/base.py +279 -0
- parrot/mcp/transports/grpc_session.py +163 -0
- parrot/mcp/transports/http.py +312 -0
- parrot/mcp/transports/mcp.proto +108 -0
- parrot/mcp/transports/quic.py +1082 -0
- parrot/mcp/transports/sse.py +330 -0
- parrot/mcp/transports/stdio.py +309 -0
- parrot/mcp/transports/unix.py +395 -0
- parrot/mcp/transports/websocket.py +547 -0
- parrot/memory/__init__.py +16 -0
- parrot/memory/abstract.py +209 -0
- parrot/memory/agent.py +32 -0
- parrot/memory/cache.py +175 -0
- parrot/memory/core.py +555 -0
- parrot/memory/file.py +153 -0
- parrot/memory/mem.py +131 -0
- parrot/memory/redis.py +613 -0
- parrot/models/__init__.py +46 -0
- parrot/models/basic.py +118 -0
- parrot/models/compliance.py +208 -0
- parrot/models/crew.py +395 -0
- parrot/models/detections.py +654 -0
- parrot/models/generation.py +85 -0
- parrot/models/google.py +223 -0
- parrot/models/groq.py +23 -0
- parrot/models/openai.py +30 -0
- parrot/models/outputs.py +285 -0
- parrot/models/responses.py +938 -0
- parrot/notifications/__init__.py +743 -0
- parrot/openapi/__init__.py +3 -0
- parrot/openapi/components.yaml +641 -0
- parrot/openapi/config.py +322 -0
- parrot/outputs/__init__.py +32 -0
- parrot/outputs/formats/__init__.py +108 -0
- parrot/outputs/formats/altair.py +359 -0
- parrot/outputs/formats/application.py +122 -0
- parrot/outputs/formats/base.py +351 -0
- parrot/outputs/formats/bokeh.py +356 -0
- parrot/outputs/formats/card.py +424 -0
- parrot/outputs/formats/chart.py +436 -0
- parrot/outputs/formats/d3.py +255 -0
- parrot/outputs/formats/echarts.py +310 -0
- parrot/outputs/formats/generators/__init__.py +0 -0
- parrot/outputs/formats/generators/abstract.py +61 -0
- parrot/outputs/formats/generators/panel.py +145 -0
- parrot/outputs/formats/generators/streamlit.py +86 -0
- parrot/outputs/formats/generators/terminal.py +63 -0
- parrot/outputs/formats/holoviews.py +310 -0
- parrot/outputs/formats/html.py +147 -0
- parrot/outputs/formats/jinja2.py +46 -0
- parrot/outputs/formats/json.py +87 -0
- parrot/outputs/formats/map.py +933 -0
- parrot/outputs/formats/markdown.py +172 -0
- parrot/outputs/formats/matplotlib.py +237 -0
- parrot/outputs/formats/mixins/__init__.py +0 -0
- parrot/outputs/formats/mixins/emaps.py +855 -0
- parrot/outputs/formats/plotly.py +341 -0
- parrot/outputs/formats/seaborn.py +310 -0
- parrot/outputs/formats/table.py +397 -0
- parrot/outputs/formats/template_report.py +138 -0
- parrot/outputs/formats/yaml.py +125 -0
- parrot/outputs/formatter.py +152 -0
- parrot/outputs/templates/__init__.py +95 -0
- parrot/pipelines/__init__.py +0 -0
- parrot/pipelines/abstract.py +210 -0
- parrot/pipelines/detector.py +124 -0
- parrot/pipelines/models.py +90 -0
- parrot/pipelines/planogram.py +3002 -0
- parrot/pipelines/table.sql +97 -0
- parrot/plugins/__init__.py +106 -0
- parrot/plugins/importer.py +80 -0
- parrot/py.typed +0 -0
- parrot/registry/__init__.py +18 -0
- parrot/registry/registry.py +594 -0
- parrot/scheduler/__init__.py +1189 -0
- parrot/scheduler/models.py +60 -0
- parrot/security/__init__.py +16 -0
- parrot/security/prompt_injection.py +268 -0
- parrot/security/security_events.sql +25 -0
- parrot/services/__init__.py +1 -0
- parrot/services/mcp/__init__.py +8 -0
- parrot/services/mcp/config.py +13 -0
- parrot/services/mcp/server.py +295 -0
- parrot/services/o365_remote_auth.py +235 -0
- parrot/stores/__init__.py +7 -0
- parrot/stores/abstract.py +352 -0
- parrot/stores/arango.py +1090 -0
- parrot/stores/bigquery.py +1377 -0
- parrot/stores/cache.py +106 -0
- parrot/stores/empty.py +10 -0
- parrot/stores/faiss_store.py +1157 -0
- parrot/stores/kb/__init__.py +9 -0
- parrot/stores/kb/abstract.py +68 -0
- parrot/stores/kb/cache.py +165 -0
- parrot/stores/kb/doc.py +325 -0
- parrot/stores/kb/hierarchy.py +346 -0
- parrot/stores/kb/local.py +457 -0
- parrot/stores/kb/prompt.py +28 -0
- parrot/stores/kb/redis.py +659 -0
- parrot/stores/kb/store.py +115 -0
- parrot/stores/kb/user.py +374 -0
- parrot/stores/models.py +59 -0
- parrot/stores/pgvector.py +3 -0
- parrot/stores/postgres.py +2853 -0
- parrot/stores/utils/__init__.py +0 -0
- parrot/stores/utils/chunking.py +197 -0
- parrot/telemetry/__init__.py +3 -0
- parrot/telemetry/mixin.py +111 -0
- parrot/template/__init__.py +3 -0
- parrot/template/engine.py +259 -0
- parrot/tools/__init__.py +23 -0
- parrot/tools/abstract.py +644 -0
- parrot/tools/agent.py +363 -0
- parrot/tools/arangodbsearch.py +537 -0
- parrot/tools/arxiv_tool.py +188 -0
- parrot/tools/calculator/__init__.py +3 -0
- parrot/tools/calculator/operations/__init__.py +38 -0
- parrot/tools/calculator/operations/calculus.py +80 -0
- parrot/tools/calculator/operations/statistics.py +76 -0
- parrot/tools/calculator/tool.py +150 -0
- parrot/tools/cloudwatch.py +988 -0
- parrot/tools/codeinterpreter/__init__.py +127 -0
- parrot/tools/codeinterpreter/executor.py +371 -0
- parrot/tools/codeinterpreter/internals.py +473 -0
- parrot/tools/codeinterpreter/models.py +643 -0
- parrot/tools/codeinterpreter/prompts.py +224 -0
- parrot/tools/codeinterpreter/tool.py +664 -0
- parrot/tools/company_info/__init__.py +6 -0
- parrot/tools/company_info/tool.py +1138 -0
- parrot/tools/correlationanalysis.py +437 -0
- parrot/tools/database/abstract.py +286 -0
- parrot/tools/database/bq.py +115 -0
- parrot/tools/database/cache.py +284 -0
- parrot/tools/database/models.py +95 -0
- parrot/tools/database/pg.py +343 -0
- parrot/tools/databasequery.py +1159 -0
- parrot/tools/db.py +1800 -0
- parrot/tools/ddgo.py +370 -0
- parrot/tools/decorators.py +271 -0
- parrot/tools/dftohtml.py +282 -0
- parrot/tools/document.py +549 -0
- parrot/tools/ecs.py +819 -0
- parrot/tools/edareport.py +368 -0
- parrot/tools/elasticsearch.py +1049 -0
- parrot/tools/employees.py +462 -0
- parrot/tools/epson/__init__.py +96 -0
- parrot/tools/excel.py +683 -0
- parrot/tools/file/__init__.py +13 -0
- parrot/tools/file/abstract.py +76 -0
- parrot/tools/file/gcs.py +378 -0
- parrot/tools/file/local.py +284 -0
- parrot/tools/file/s3.py +511 -0
- parrot/tools/file/tmp.py +309 -0
- parrot/tools/file/tool.py +501 -0
- parrot/tools/file_reader.py +129 -0
- parrot/tools/flowtask/__init__.py +19 -0
- parrot/tools/flowtask/tool.py +761 -0
- parrot/tools/gittoolkit.py +508 -0
- parrot/tools/google/__init__.py +18 -0
- parrot/tools/google/base.py +169 -0
- parrot/tools/google/tools.py +1251 -0
- parrot/tools/googlelocation.py +5 -0
- parrot/tools/googleroutes.py +5 -0
- parrot/tools/googlesearch.py +5 -0
- parrot/tools/googlesitesearch.py +5 -0
- parrot/tools/googlevoice.py +2 -0
- parrot/tools/gvoice.py +695 -0
- parrot/tools/ibisworld/README.md +225 -0
- parrot/tools/ibisworld/__init__.py +11 -0
- parrot/tools/ibisworld/tool.py +366 -0
- parrot/tools/jiratoolkit.py +1718 -0
- parrot/tools/manager.py +1098 -0
- parrot/tools/math.py +152 -0
- parrot/tools/metadata.py +476 -0
- parrot/tools/msteams.py +1621 -0
- parrot/tools/msword.py +635 -0
- parrot/tools/multidb.py +580 -0
- parrot/tools/multistoresearch.py +369 -0
- parrot/tools/networkninja.py +167 -0
- parrot/tools/nextstop/__init__.py +4 -0
- parrot/tools/nextstop/base.py +286 -0
- parrot/tools/nextstop/employee.py +733 -0
- parrot/tools/nextstop/store.py +462 -0
- parrot/tools/notification.py +435 -0
- parrot/tools/o365/__init__.py +42 -0
- parrot/tools/o365/base.py +295 -0
- parrot/tools/o365/bundle.py +522 -0
- parrot/tools/o365/events.py +554 -0
- parrot/tools/o365/mail.py +992 -0
- parrot/tools/o365/onedrive.py +497 -0
- parrot/tools/o365/sharepoint.py +641 -0
- parrot/tools/openapi_toolkit.py +904 -0
- parrot/tools/openweather.py +527 -0
- parrot/tools/pdfprint.py +1001 -0
- parrot/tools/powerbi.py +518 -0
- parrot/tools/powerpoint.py +1113 -0
- parrot/tools/pricestool.py +146 -0
- parrot/tools/products/__init__.py +246 -0
- parrot/tools/prophet_tool.py +171 -0
- parrot/tools/pythonpandas.py +630 -0
- parrot/tools/pythonrepl.py +910 -0
- parrot/tools/qsource.py +436 -0
- parrot/tools/querytoolkit.py +395 -0
- parrot/tools/quickeda.py +827 -0
- parrot/tools/resttool.py +553 -0
- parrot/tools/retail/__init__.py +0 -0
- parrot/tools/retail/bby.py +528 -0
- parrot/tools/sandboxtool.py +703 -0
- parrot/tools/sassie/__init__.py +352 -0
- parrot/tools/scraping/__init__.py +7 -0
- parrot/tools/scraping/docs/select.md +466 -0
- parrot/tools/scraping/documentation.md +1278 -0
- parrot/tools/scraping/driver.py +436 -0
- parrot/tools/scraping/models.py +576 -0
- parrot/tools/scraping/options.py +85 -0
- parrot/tools/scraping/orchestrator.py +517 -0
- parrot/tools/scraping/readme.md +740 -0
- parrot/tools/scraping/tool.py +3115 -0
- parrot/tools/seasonaldetection.py +642 -0
- parrot/tools/shell_tool/__init__.py +5 -0
- parrot/tools/shell_tool/actions.py +408 -0
- parrot/tools/shell_tool/engine.py +155 -0
- parrot/tools/shell_tool/models.py +322 -0
- parrot/tools/shell_tool/tool.py +442 -0
- parrot/tools/site_search.py +214 -0
- parrot/tools/textfile.py +418 -0
- parrot/tools/think.py +378 -0
- parrot/tools/toolkit.py +298 -0
- parrot/tools/webapp_tool.py +187 -0
- parrot/tools/whatif.py +1279 -0
- parrot/tools/workday/MULTI_WSDL_EXAMPLE.md +249 -0
- parrot/tools/workday/__init__.py +6 -0
- parrot/tools/workday/models.py +1389 -0
- parrot/tools/workday/tool.py +1293 -0
- parrot/tools/yfinance_tool.py +306 -0
- parrot/tools/zipcode.py +217 -0
- parrot/utils/__init__.py +2 -0
- parrot/utils/helpers.py +73 -0
- parrot/utils/parsers/__init__.py +5 -0
- parrot/utils/parsers/toml.c +12078 -0
- parrot/utils/parsers/toml.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/parsers/toml.pyx +21 -0
- parrot/utils/toml.py +11 -0
- parrot/utils/types.cpp +20936 -0
- parrot/utils/types.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/types.pyx +213 -0
- parrot/utils/uv.py +11 -0
- parrot/version.py +10 -0
- parrot/yaml-rs/Cargo.lock +350 -0
- parrot/yaml-rs/Cargo.toml +19 -0
- parrot/yaml-rs/pyproject.toml +19 -0
- parrot/yaml-rs/python/yaml_rs/__init__.py +81 -0
- parrot/yaml-rs/src/lib.rs +222 -0
- requirements/docker-compose.yml +24 -0
- requirements/requirements-dev.txt +21 -0
parrot/clients/groq.py
ADDED
|
@@ -0,0 +1,986 @@
|
|
|
1
|
+
import traceback
|
|
2
|
+
from typing import AsyncIterator, List, Optional, Union, Any
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from logging import getLogger
|
|
5
|
+
import uuid
|
|
6
|
+
import time
|
|
7
|
+
import json
|
|
8
|
+
from dataclasses import is_dataclass
|
|
9
|
+
from pydantic import BaseModel, TypeAdapter
|
|
10
|
+
from datamodel.parsers.json import json_decoder # pylint: disable=E0611 # noqa
|
|
11
|
+
from navconfig import config
|
|
12
|
+
from groq import AsyncGroq
|
|
13
|
+
from .base import AbstractClient
|
|
14
|
+
from ..models import (
|
|
15
|
+
AIMessage,
|
|
16
|
+
AIMessageFactory,
|
|
17
|
+
ToolCall,
|
|
18
|
+
StructuredOutputConfig,
|
|
19
|
+
OutputFormat,
|
|
20
|
+
)
|
|
21
|
+
from ..models.groq import GroqModel
|
|
22
|
+
from ..models.outputs import (
|
|
23
|
+
SentimentAnalysis,
|
|
24
|
+
ProductReview
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
getLogger('httpx').setLevel('WARNING')
|
|
29
|
+
getLogger('httpcore').setLevel('WARNING')
|
|
30
|
+
getLogger('groq').setLevel('INFO')
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
STRUCTURED_OUTPUT_COMPATIBLE_MODELS = {
|
|
34
|
+
GroqModel.LLAMA_4_SCOUT_17B.value,
|
|
35
|
+
GroqModel.LLAMA_4_MAVERICK_17B.value,
|
|
36
|
+
GroqModel.KIMI_K2_INSTRUCT.value,
|
|
37
|
+
GroqModel.OPENAI_GPT_OSS_SAFEGUARD_20B.value,
|
|
38
|
+
GroqModel.OPENAI_GPT_OSS_20B.value,
|
|
39
|
+
GroqModel.OPENAI_GPT_OSS_120B.value,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class GroqClient(AbstractClient):
|
|
44
|
+
"""Client for interacting with Groq's API.
|
|
45
|
+
|
|
46
|
+
Note: Groq has a limitation where structured output (JSON mode) cannot be
|
|
47
|
+
combined with tool calling in the same request. When both are requested,
|
|
48
|
+
this client handles tools first, then makes a separate request for
|
|
49
|
+
structured output formatting.
|
|
50
|
+
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
client_type: str = "groq"
|
|
54
|
+
client_name: str = "groq"
|
|
55
|
+
model: str = GroqModel.LLAMA_3_3_70B_VERSATILE
|
|
56
|
+
_default_model: str = 'openai/gpt-oss-120b'
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
api_key: str = None,
|
|
61
|
+
base_url: str = "https://api.groq.com/openai/v1",
|
|
62
|
+
**kwargs
|
|
63
|
+
):
|
|
64
|
+
self.api_key = api_key or config.get('GROQ_API_KEY')
|
|
65
|
+
self.base_url = base_url
|
|
66
|
+
self.base_headers = {
|
|
67
|
+
"Content-Type": "application/json",
|
|
68
|
+
"Authorization": f"Bearer {self.api_key}"
|
|
69
|
+
}
|
|
70
|
+
super().__init__(**kwargs)
|
|
71
|
+
|
|
72
|
+
async def get_client(self) -> AsyncGroq:
|
|
73
|
+
"""Initialize the Groq client."""
|
|
74
|
+
return AsyncGroq(api_key=self.api_key)
|
|
75
|
+
|
|
76
|
+
def _fix_schema_for_groq(self, schema: dict) -> dict:
|
|
77
|
+
"""
|
|
78
|
+
Fix JSON schema to comply with Groq's structured-output validator.
|
|
79
|
+
|
|
80
|
+
- Start from the OpenAI-normalized schema (handles additionalProperties, required, etc.).
|
|
81
|
+
- Collapse Optional[T] patterns:
|
|
82
|
+
anyOf: [T, {"type": "null"}] -> T (keeping default/title/description).
|
|
83
|
+
- Resolve Groq's ambiguity with integer vs number:
|
|
84
|
+
anyOf: [{"type": "integer"}, {"type": "number"}, ...] -> drop "integer".
|
|
85
|
+
- Drop some scalar constraints Groq doesn't care about.
|
|
86
|
+
"""
|
|
87
|
+
# First apply your generic OpenAI-style normalization
|
|
88
|
+
schema = self._oai_normalize_schema(schema, force_required_all=False)
|
|
89
|
+
|
|
90
|
+
unsupported_constraints = [
|
|
91
|
+
"minimum", "maximum", "exclusiveMinimum", "exclusiveMaximum",
|
|
92
|
+
"minLength", "maxLength", "pattern", "format",
|
|
93
|
+
"minItems", "maxItems", "uniqueItems",
|
|
94
|
+
"minProperties", "maxProperties",
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
def visit(node):
|
|
98
|
+
if isinstance(node, dict):
|
|
99
|
+
# Drop constraints Groq doesn't care about
|
|
100
|
+
for c in unsupported_constraints:
|
|
101
|
+
node.pop(c, None)
|
|
102
|
+
|
|
103
|
+
# --- Handle anyOf ---
|
|
104
|
+
if "anyOf" in node and isinstance(node["anyOf"], list):
|
|
105
|
+
variants = node["anyOf"]
|
|
106
|
+
|
|
107
|
+
# 1) Fix integer/number overlap for Groq
|
|
108
|
+
type_variants = [
|
|
109
|
+
v.get("type") for v in variants
|
|
110
|
+
if isinstance(v, dict) and "type" in v
|
|
111
|
+
]
|
|
112
|
+
if "number" in type_variants and "integer" in type_variants:
|
|
113
|
+
new_variants = []
|
|
114
|
+
for v in variants:
|
|
115
|
+
if isinstance(v, dict) and v.get("type") == "integer":
|
|
116
|
+
# drop integer variant when number is also present
|
|
117
|
+
continue
|
|
118
|
+
new_variants.append(v)
|
|
119
|
+
variants = new_variants
|
|
120
|
+
node["anyOf"] = variants
|
|
121
|
+
|
|
122
|
+
# 2) Collapse Optional[T] pattern: anyOf: [T, {"type": "null"}]
|
|
123
|
+
# 2) Collapse Optional[T] pattern: anyOf: [T, {"type": "null"}]
|
|
124
|
+
# COMMENTED OUT: This removes nullability which causes validation errors when model returns null
|
|
125
|
+
# non_null = [
|
|
126
|
+
# v for v in variants
|
|
127
|
+
# if not (isinstance(v, dict) and v.get("type") == "null")
|
|
128
|
+
# ]
|
|
129
|
+
# nulls = [
|
|
130
|
+
# v for v in variants
|
|
131
|
+
# if isinstance(v, dict) and v.get("type") == "null"
|
|
132
|
+
# ]
|
|
133
|
+
|
|
134
|
+
# if len(non_null) == 1 and len(nulls) >= 1:
|
|
135
|
+
# base = visit(non_null[0]) # recurse into T
|
|
136
|
+
#
|
|
137
|
+
# # Preserve metadata from wrapper (title, default, description...)
|
|
138
|
+
# for k, v in list(node.items()):
|
|
139
|
+
# if k == "anyOf":
|
|
140
|
+
# continue
|
|
141
|
+
# base.setdefault(k, v)
|
|
142
|
+
#
|
|
143
|
+
# node.clear()
|
|
144
|
+
# node.update(base)
|
|
145
|
+
# else:
|
|
146
|
+
# # Just recurse into each variant
|
|
147
|
+
# node["anyOf"] = [visit(v) for v in variants]
|
|
148
|
+
|
|
149
|
+
# Original logic above was stripping NULL from AnyOf.
|
|
150
|
+
# We simply recurse now.
|
|
151
|
+
node["anyOf"] = [visit(v) for v in variants]
|
|
152
|
+
|
|
153
|
+
# Recurse into object properties / patternProperties
|
|
154
|
+
for key in ("properties", "patternProperties"):
|
|
155
|
+
if key in node and isinstance(node[key], dict):
|
|
156
|
+
for k, v in list(node[key].items()):
|
|
157
|
+
node[key][k] = visit(v)
|
|
158
|
+
|
|
159
|
+
# Recurse into items (array element schemas)
|
|
160
|
+
if "items" in node and isinstance(node["items"], (dict, list)):
|
|
161
|
+
node["items"] = visit(node["items"])
|
|
162
|
+
|
|
163
|
+
# Recurse into combinators other than anyOf
|
|
164
|
+
for key in ("allOf", "oneOf"):
|
|
165
|
+
if key in node and isinstance(node[key], list):
|
|
166
|
+
node[key] = [visit(v) for v in node[key]]
|
|
167
|
+
|
|
168
|
+
# 🔴 IMPORTANT: recurse into $defs / definitions (this is what was missing)
|
|
169
|
+
for key in ("$defs", "definitions"):
|
|
170
|
+
if key in node and isinstance(node[key], dict):
|
|
171
|
+
for k, v in list(node[key].items()):
|
|
172
|
+
node[key][k] = visit(v)
|
|
173
|
+
|
|
174
|
+
elif isinstance(node, list):
|
|
175
|
+
return [visit(v) for v in node]
|
|
176
|
+
|
|
177
|
+
return node
|
|
178
|
+
|
|
179
|
+
return visit(dict(schema))
|
|
180
|
+
|
|
181
|
+
def _prepare_groq_tools(self) -> List[dict]:
|
|
182
|
+
groq_tools = []
|
|
183
|
+
for tool in self.tool_manager.all_tools():
|
|
184
|
+
tool_name = tool.name if hasattr(tool, "name") else tool.__class__.__name__
|
|
185
|
+
print(f":::: Preparing tool: {tool_name}")
|
|
186
|
+
|
|
187
|
+
# 1) get a *parameter* schema, not a full tool descriptor
|
|
188
|
+
if hasattr(tool, "input_schema") and tool.input_schema:
|
|
189
|
+
param_schema = tool.input_schema
|
|
190
|
+
elif hasattr(tool, "get_tool_schema"):
|
|
191
|
+
full = tool.get_tool_schema()
|
|
192
|
+
param_schema = full.get("parameters", full)
|
|
193
|
+
else:
|
|
194
|
+
param_schema = {
|
|
195
|
+
"type": "object",
|
|
196
|
+
"properties": {},
|
|
197
|
+
"additionalProperties": False,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
# 2) normalize for Groq
|
|
201
|
+
fixed_schema = self._fix_schema_for_groq(param_schema)
|
|
202
|
+
|
|
203
|
+
groq_tools.append({
|
|
204
|
+
"type": "function",
|
|
205
|
+
"function": {
|
|
206
|
+
"name": tool_name,
|
|
207
|
+
"description": getattr(tool, "description", "") or "",
|
|
208
|
+
"parameters": fixed_schema
|
|
209
|
+
}
|
|
210
|
+
})
|
|
211
|
+
return groq_tools
|
|
212
|
+
|
|
213
|
+
def _prepare_structured_output_format(self, structured_output: type) -> dict:
|
|
214
|
+
if not structured_output:
|
|
215
|
+
return {}
|
|
216
|
+
|
|
217
|
+
# Normalize instance → class
|
|
218
|
+
if isinstance(structured_output, BaseModel):
|
|
219
|
+
structured_output = structured_output.__class__
|
|
220
|
+
if is_dataclass(structured_output) and not isinstance(structured_output, type):
|
|
221
|
+
structured_output = structured_output.__class__
|
|
222
|
+
|
|
223
|
+
# Pydantic models
|
|
224
|
+
if isinstance(structured_output, type) and hasattr(structured_output, 'model_json_schema'):
|
|
225
|
+
schema = structured_output.model_json_schema()
|
|
226
|
+
fixed_schema = self._fix_schema_for_groq(schema)
|
|
227
|
+
return {
|
|
228
|
+
"response_format": {
|
|
229
|
+
"type": "json_schema",
|
|
230
|
+
"json_schema": {
|
|
231
|
+
"name": structured_output.__name__.lower(),
|
|
232
|
+
"schema": fixed_schema,
|
|
233
|
+
"strict": True
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
# Dataclasses
|
|
239
|
+
if is_dataclass(structured_output):
|
|
240
|
+
schema = TypeAdapter(structured_output).json_schema()
|
|
241
|
+
fixed_schema = self._fix_schema_for_groq(schema)
|
|
242
|
+
return {
|
|
243
|
+
"response_format": {
|
|
244
|
+
"type": "json_schema",
|
|
245
|
+
"json_schema": {
|
|
246
|
+
"name": structured_output.__name__.lower(),
|
|
247
|
+
"schema": fixed_schema,
|
|
248
|
+
"strict": True
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
# Fallback
|
|
254
|
+
return {"response_format": {"type": "json_object"}}
|
|
255
|
+
|
|
256
|
+
async def ask(
|
|
257
|
+
self,
|
|
258
|
+
prompt: str,
|
|
259
|
+
model: str = GroqModel.LLAMA_3_3_70B_VERSATILE,
|
|
260
|
+
max_tokens: int = 4096,
|
|
261
|
+
temperature: float = 0.1,
|
|
262
|
+
top_p: float = 0.9,
|
|
263
|
+
files: Optional[List[Union[str, Path]]] = None,
|
|
264
|
+
system_prompt: Optional[str] = None,
|
|
265
|
+
structured_output: Union[type, StructuredOutputConfig, None] = None,
|
|
266
|
+
user_id: Optional[str] = None,
|
|
267
|
+
session_id: Optional[str] = None,
|
|
268
|
+
tools: Optional[List[dict]] = None,
|
|
269
|
+
use_tools: Optional[bool] = None,
|
|
270
|
+
use_code_interpreter: Optional[bool] = None
|
|
271
|
+
) -> AIMessage:
|
|
272
|
+
"""Ask Groq a question with optional conversation memory."""
|
|
273
|
+
model = model.value if isinstance(model, GroqModel) else model
|
|
274
|
+
# Generate unique turn ID for tracking
|
|
275
|
+
turn_id = str(uuid.uuid4())
|
|
276
|
+
original_prompt = prompt
|
|
277
|
+
_use_tools = use_tools if use_tools is not None else self.enable_tools
|
|
278
|
+
|
|
279
|
+
messages, conversation_session, system_prompt = await self._prepare_conversation_context(
|
|
280
|
+
prompt, files, user_id, session_id, system_prompt
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if system_prompt:
|
|
284
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
285
|
+
|
|
286
|
+
# Prepare tools
|
|
287
|
+
if tools and isinstance(tools, list):
|
|
288
|
+
for tool in tools:
|
|
289
|
+
self.register_tool(tool)
|
|
290
|
+
if _use_tools:
|
|
291
|
+
tools = self._prepare_groq_tools()
|
|
292
|
+
else:
|
|
293
|
+
tools = None
|
|
294
|
+
|
|
295
|
+
# Groq doesn't support combining structured output with tools
|
|
296
|
+
# Priority: tools first, then structured output in separate request if needed
|
|
297
|
+
output_config = self._get_structured_config(
|
|
298
|
+
structured_output
|
|
299
|
+
)
|
|
300
|
+
use_tools = _use_tools
|
|
301
|
+
use_structured_output = bool(output_config)
|
|
302
|
+
|
|
303
|
+
structured_output_for_later: Optional[StructuredOutputConfig] = None
|
|
304
|
+
request_output_config: Optional[StructuredOutputConfig] = output_config
|
|
305
|
+
|
|
306
|
+
# NEW: per-request flag
|
|
307
|
+
request_use_structured_output = bool(request_output_config)
|
|
308
|
+
|
|
309
|
+
if use_structured_output and model not in STRUCTURED_OUTPUT_COMPATIBLE_MODELS:
|
|
310
|
+
self.logger.error(
|
|
311
|
+
f"The model '{model}' does not support structured output. "
|
|
312
|
+
"Please choose a compatible model."
|
|
313
|
+
)
|
|
314
|
+
model = GroqModel.LLAMA_4_SCOUT_17B.value
|
|
315
|
+
|
|
316
|
+
if use_tools and request_use_structured_output:
|
|
317
|
+
# Handle tools first, structured output later
|
|
318
|
+
structured_output_for_later = output_config
|
|
319
|
+
request_output_config = None
|
|
320
|
+
request_use_structured_output = False # IMPORTANT
|
|
321
|
+
|
|
322
|
+
# Track tool calls for the response
|
|
323
|
+
all_tool_calls = []
|
|
324
|
+
|
|
325
|
+
# Prepare request arguments
|
|
326
|
+
request_args = {
|
|
327
|
+
"model": model,
|
|
328
|
+
"messages": messages,
|
|
329
|
+
"max_tokens": max_tokens,
|
|
330
|
+
"temperature": temperature,
|
|
331
|
+
"top_p": top_p,
|
|
332
|
+
"stream": False
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
if use_tools and not request_use_structured_output:
|
|
336
|
+
request_args["tool_choice"] = "auto"
|
|
337
|
+
request_args["tools"] = tools or []
|
|
338
|
+
if model != getattr(GroqModel, "GEMMA2_9B_IT", None) and \
|
|
339
|
+
model != getattr(GroqModel, "GEMMA2_9B_IT", "google/gemma-2-9b-it"):
|
|
340
|
+
request_args["parallel_tool_calls"] = True
|
|
341
|
+
elif use_code_interpreter:
|
|
342
|
+
if model in ("openai/gpt-oss-20b", "openai/gpt-oss-120b"):
|
|
343
|
+
request_args["tool_choice"] = "required"
|
|
344
|
+
request_args["tools"] = [
|
|
345
|
+
{
|
|
346
|
+
"type": "browser_search"
|
|
347
|
+
},
|
|
348
|
+
{
|
|
349
|
+
"type": "code_interpreter"
|
|
350
|
+
}
|
|
351
|
+
]
|
|
352
|
+
|
|
353
|
+
# Add structured output format if no tools
|
|
354
|
+
if request_output_config and not use_tools:
|
|
355
|
+
self._ensure_json_instruction(
|
|
356
|
+
messages,
|
|
357
|
+
"Please respond with a valid JSON object that matches the requested schema."
|
|
358
|
+
)
|
|
359
|
+
if request_output_config.format == OutputFormat.JSON:
|
|
360
|
+
if output_type := request_output_config.output_type:
|
|
361
|
+
request_args.update(
|
|
362
|
+
self._prepare_structured_output_format(output_type)
|
|
363
|
+
)
|
|
364
|
+
else:
|
|
365
|
+
request_args["response_format"] = {"type": "json_object"}
|
|
366
|
+
|
|
367
|
+
# Make initial request
|
|
368
|
+
self.logger.debug(
|
|
369
|
+
f"Groq request: use_tools={use_tools}, "
|
|
370
|
+
f"request_output_config={'yes' if request_output_config else 'no'}, "
|
|
371
|
+
f"tools_in_request={'tools' in request_args}"
|
|
372
|
+
)
|
|
373
|
+
response = await self.client.chat.completions.create(**request_args)
|
|
374
|
+
result = response.choices[0].message
|
|
375
|
+
|
|
376
|
+
# Handle tool calls in a loop (only if tools were enabled)
|
|
377
|
+
if use_tools:
|
|
378
|
+
# Keep track of conversation turns
|
|
379
|
+
conversation_turns = 0
|
|
380
|
+
max_turns = 10 # Prevent infinite loops
|
|
381
|
+
|
|
382
|
+
while result.tool_calls and conversation_turns < max_turns:
|
|
383
|
+
conversation_turns += 1
|
|
384
|
+
|
|
385
|
+
# Add the assistant's message with tool calls to conversation
|
|
386
|
+
messages.append({
|
|
387
|
+
"role": "assistant",
|
|
388
|
+
"content": result.content or "",
|
|
389
|
+
"tool_calls": [
|
|
390
|
+
{
|
|
391
|
+
"id": tc.id,
|
|
392
|
+
"type": "function",
|
|
393
|
+
"function": {
|
|
394
|
+
"name": tc.function.name,
|
|
395
|
+
"arguments": tc.function.arguments
|
|
396
|
+
}
|
|
397
|
+
} for tc in result.tool_calls
|
|
398
|
+
]
|
|
399
|
+
})
|
|
400
|
+
|
|
401
|
+
# Execute each tool call
|
|
402
|
+
for tool_call in result.tool_calls:
|
|
403
|
+
tool_name = tool_call.function.name
|
|
404
|
+
try:
|
|
405
|
+
tool_args = self._json.loads(tool_call.function.arguments)
|
|
406
|
+
except json.JSONDecodeError:
|
|
407
|
+
tool_args = json_decoder(tool_call.function.arguments)
|
|
408
|
+
|
|
409
|
+
# Create ToolCall object and execute
|
|
410
|
+
tc = ToolCall(
|
|
411
|
+
id=tool_call.id,
|
|
412
|
+
name=tool_name,
|
|
413
|
+
arguments=tool_args
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
start_time = time.time()
|
|
418
|
+
tool_result = await self._execute_tool(tool_name, tool_args)
|
|
419
|
+
execution_time = time.time() - start_time
|
|
420
|
+
tc.result = tool_result
|
|
421
|
+
tc.execution_time = execution_time
|
|
422
|
+
|
|
423
|
+
# Add tool result to conversation
|
|
424
|
+
messages.append({
|
|
425
|
+
"role": "tool",
|
|
426
|
+
"tool_call_id": tool_call.id,
|
|
427
|
+
"name": tool_name,
|
|
428
|
+
"content": str(tool_result)
|
|
429
|
+
})
|
|
430
|
+
except Exception as e:
|
|
431
|
+
tc.error = str(e)
|
|
432
|
+
trace = traceback.format_exc()
|
|
433
|
+
# Add error to conversation
|
|
434
|
+
messages.append({
|
|
435
|
+
"role": "tool",
|
|
436
|
+
"tool_call_id": tool_call.id,
|
|
437
|
+
"name": tool_name,
|
|
438
|
+
"content": f"Error: {str(e)}",
|
|
439
|
+
"traceback": trace
|
|
440
|
+
})
|
|
441
|
+
|
|
442
|
+
all_tool_calls.append(tc)
|
|
443
|
+
|
|
444
|
+
# Continue conversation with tool results to get final response
|
|
445
|
+
continue_args = {
|
|
446
|
+
"model": model,
|
|
447
|
+
"messages": messages,
|
|
448
|
+
"max_tokens": max_tokens,
|
|
449
|
+
"temperature": temperature,
|
|
450
|
+
"top_p": top_p,
|
|
451
|
+
"stream": False,
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
# Only add tools if we want to allow further tool calls
|
|
455
|
+
# For final response, we might want to remove tools to force a text response
|
|
456
|
+
if conversation_turns < max_turns - 1: # Allow more tool calls if not at limit
|
|
457
|
+
continue_args["tools"] = tools
|
|
458
|
+
continue_args["tool_choice"] = "auto"
|
|
459
|
+
if model != GroqModel.GEMMA2_9B_IT:
|
|
460
|
+
continue_args["parallel_tool_calls"] = True
|
|
461
|
+
else:
|
|
462
|
+
# Force final response without more tool calls
|
|
463
|
+
continue_args["tool_choice"] = "none"
|
|
464
|
+
|
|
465
|
+
response = await self.client.chat.completions.create(**continue_args)
|
|
466
|
+
result = response.choices[0].message
|
|
467
|
+
|
|
468
|
+
# Handle structured output after tools if needed
|
|
469
|
+
final_output = None
|
|
470
|
+
parsed_config: Optional[StructuredOutputConfig] = None
|
|
471
|
+
if structured_output_for_later and use_tools:
|
|
472
|
+
# Add the final tool response to messages
|
|
473
|
+
if result.content:
|
|
474
|
+
messages.append({
|
|
475
|
+
"role": "assistant",
|
|
476
|
+
"content": result.content
|
|
477
|
+
})
|
|
478
|
+
|
|
479
|
+
# Make a new request for structured output
|
|
480
|
+
json_followup_instruction = (
|
|
481
|
+
"Please format the above response as valid JSON that matches the requested structure."
|
|
482
|
+
)
|
|
483
|
+
messages.append({
|
|
484
|
+
"role": "user",
|
|
485
|
+
"content": [{"type": "text", "text": json_followup_instruction}]
|
|
486
|
+
})
|
|
487
|
+
|
|
488
|
+
self._ensure_json_instruction(messages, json_followup_instruction)
|
|
489
|
+
|
|
490
|
+
structured_args = {
|
|
491
|
+
"model": model,
|
|
492
|
+
"messages": messages,
|
|
493
|
+
"max_tokens": max_tokens,
|
|
494
|
+
"temperature": temperature,
|
|
495
|
+
"top_p": top_p,
|
|
496
|
+
"stream": False
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
if structured_output_for_later.format == OutputFormat.JSON:
|
|
500
|
+
output_type = structured_output_for_later.output_type
|
|
501
|
+
if output_type:
|
|
502
|
+
structured_args.update(
|
|
503
|
+
self._prepare_structured_output_format(output_type)
|
|
504
|
+
)
|
|
505
|
+
else:
|
|
506
|
+
structured_args["response_format"] = {"type": "json_object"}
|
|
507
|
+
|
|
508
|
+
structured_response = await self.client.chat.completions.create(**structured_args)
|
|
509
|
+
result = structured_response.message if hasattr(
|
|
510
|
+
structured_response,
|
|
511
|
+
'message'
|
|
512
|
+
) else structured_response.choices[0].message
|
|
513
|
+
|
|
514
|
+
parsed_config = structured_output_for_later
|
|
515
|
+
else:
|
|
516
|
+
parsed_config = request_output_config
|
|
517
|
+
|
|
518
|
+
response_text = result.content if isinstance(result.content, str) else self._json.dumps(result.content)
|
|
519
|
+
if parsed_config:
|
|
520
|
+
try:
|
|
521
|
+
final_output = await self._parse_structured_output(
|
|
522
|
+
response_text,
|
|
523
|
+
parsed_config
|
|
524
|
+
)
|
|
525
|
+
except Exception: # pylint: disable=broad-except
|
|
526
|
+
final_output = response_text
|
|
527
|
+
else:
|
|
528
|
+
final_output = result.content
|
|
529
|
+
|
|
530
|
+
# Add final assistant message to conversation (if not already added)
|
|
531
|
+
if not (use_tools and result.content): # Only add if we haven't already added it in tool handling
|
|
532
|
+
messages.append({
|
|
533
|
+
"role": "assistant",
|
|
534
|
+
"content": result.content or ""
|
|
535
|
+
})
|
|
536
|
+
|
|
537
|
+
# Update conversation memory
|
|
538
|
+
tools_used = [tc.name for tc in all_tool_calls]
|
|
539
|
+
assistant_response_text = result.content if isinstance(
|
|
540
|
+
result.content, str) else self._json.dumps(result.content)
|
|
541
|
+
await self._update_conversation_memory(
|
|
542
|
+
user_id,
|
|
543
|
+
session_id,
|
|
544
|
+
conversation_session,
|
|
545
|
+
messages,
|
|
546
|
+
system_prompt,
|
|
547
|
+
turn_id,
|
|
548
|
+
original_prompt,
|
|
549
|
+
assistant_response_text,
|
|
550
|
+
tools_used
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# Create AIMessage using factory
|
|
554
|
+
structured_payload = None
|
|
555
|
+
if parsed_config and final_output is not None and not (
|
|
556
|
+
isinstance(final_output, str) and final_output == response_text
|
|
557
|
+
):
|
|
558
|
+
structured_payload = final_output
|
|
559
|
+
|
|
560
|
+
ai_message = AIMessageFactory.from_groq(
|
|
561
|
+
response=response,
|
|
562
|
+
input_text=original_prompt,
|
|
563
|
+
model=model,
|
|
564
|
+
user_id=user_id,
|
|
565
|
+
session_id=session_id,
|
|
566
|
+
turn_id=turn_id,
|
|
567
|
+
structured_output=structured_payload
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
# Add tool calls to the response
|
|
571
|
+
ai_message.tool_calls = all_tool_calls
|
|
572
|
+
|
|
573
|
+
return ai_message
|
|
574
|
+
|
|
575
|
+
async def ask_stream(
|
|
576
|
+
self,
|
|
577
|
+
prompt: str,
|
|
578
|
+
model: str = GroqModel.LLAMA_3_3_70B_VERSATILE,
|
|
579
|
+
max_tokens: int = 4096,
|
|
580
|
+
temperature: float = 0.1,
|
|
581
|
+
top_p: float = 0.9,
|
|
582
|
+
files: Optional[List[Union[str, Path]]] = None,
|
|
583
|
+
system_prompt: Optional[str] = None,
|
|
584
|
+
user_id: Optional[str] = None,
|
|
585
|
+
session_id: Optional[str] = None,
|
|
586
|
+
tools: Optional[List[dict]] = None
|
|
587
|
+
) -> AsyncIterator[str]:
|
|
588
|
+
"""Stream Groq's response with optional conversation memory."""
|
|
589
|
+
|
|
590
|
+
# Generate unique turn ID for tracking
|
|
591
|
+
turn_id = str(uuid.uuid4())
|
|
592
|
+
model = model.value if isinstance(model, GroqModel) else model
|
|
593
|
+
|
|
594
|
+
messages, conversation_session, system_prompt = await self._prepare_conversation_context(
|
|
595
|
+
prompt, files, user_id, session_id, system_prompt
|
|
596
|
+
)
|
|
597
|
+
|
|
598
|
+
if system_prompt:
|
|
599
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
600
|
+
|
|
601
|
+
# Prepare request arguments
|
|
602
|
+
request_args = {
|
|
603
|
+
"model": model,
|
|
604
|
+
"messages": messages,
|
|
605
|
+
"max_tokens": max_tokens,
|
|
606
|
+
"temperature": temperature,
|
|
607
|
+
"top_p": top_p,
|
|
608
|
+
"stream": True
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
# Note: For streaming, we don't handle tools in this version
|
|
612
|
+
# You might want to implement a more sophisticated streaming + tools handler
|
|
613
|
+
if tools and isinstance(tools, list):
|
|
614
|
+
for tool in tools:
|
|
615
|
+
self.register_tool(tool)
|
|
616
|
+
# Prepare tools for the request
|
|
617
|
+
tools = self._prepare_groq_tools()
|
|
618
|
+
if tools:
|
|
619
|
+
request_args["tools"] = tools
|
|
620
|
+
request_args["tool_choice"] = "auto"
|
|
621
|
+
|
|
622
|
+
response_stream = await self.client.chat.completions.create(**request_args)
|
|
623
|
+
|
|
624
|
+
assistant_content = ""
|
|
625
|
+
async for chunk in response_stream:
|
|
626
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
|
627
|
+
text_chunk = chunk.choices[0].delta.content
|
|
628
|
+
assistant_content += text_chunk
|
|
629
|
+
yield text_chunk
|
|
630
|
+
|
|
631
|
+
# Update conversation memory if content was generated
|
|
632
|
+
if assistant_content:
|
|
633
|
+
messages.append({
|
|
634
|
+
"role": "assistant",
|
|
635
|
+
"content": assistant_content
|
|
636
|
+
})
|
|
637
|
+
# Update conversation memory
|
|
638
|
+
tools_used = []
|
|
639
|
+
await self._update_conversation_memory(
|
|
640
|
+
user_id,
|
|
641
|
+
session_id,
|
|
642
|
+
conversation_session,
|
|
643
|
+
messages,
|
|
644
|
+
system_prompt,
|
|
645
|
+
turn_id,
|
|
646
|
+
prompt,
|
|
647
|
+
assistant_content,
|
|
648
|
+
tools_used
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
async def batch_ask(self, requests):
|
|
652
|
+
"""Process multiple requests in batch."""
|
|
653
|
+
return await super().batch_ask(requests)
|
|
654
|
+
|
|
655
|
+
async def summarize_text(
|
|
656
|
+
self,
|
|
657
|
+
text: str,
|
|
658
|
+
model: str = GroqModel.LLAMA_3_3_70B_VERSATILE,
|
|
659
|
+
max_tokens: int = 1024,
|
|
660
|
+
temperature: float = 0.1,
|
|
661
|
+
system_prompt: Optional[str] = None,
|
|
662
|
+
top_p: float = 0.9,
|
|
663
|
+
user_id: Optional[str] = None,
|
|
664
|
+
session_id: Optional[str] = None
|
|
665
|
+
) -> AIMessage:
|
|
666
|
+
"""Summarize the given text using Groq API.
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
text (str): The text to be summarized.
|
|
670
|
+
model (str): The Groq model to use.
|
|
671
|
+
max_tokens (int): Maximum tokens for the response.
|
|
672
|
+
temperature (float): Sampling temperature.
|
|
673
|
+
top_p (float): Top-p sampling.
|
|
674
|
+
|
|
675
|
+
Returns:
|
|
676
|
+
str: The summarized text.
|
|
677
|
+
"""
|
|
678
|
+
# Generate unique turn ID for tracking
|
|
679
|
+
turn_id = str(uuid.uuid4())
|
|
680
|
+
original_prompt = text
|
|
681
|
+
model = model.value if isinstance(model, GroqModel) else model
|
|
682
|
+
|
|
683
|
+
system_prompt = system_prompt or "Summarize the following text:"
|
|
684
|
+
|
|
685
|
+
messages, conversation_session, system_prompt = await self._prepare_conversation_context(
|
|
686
|
+
original_prompt, None, user_id, session_id, system_prompt
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
if system_prompt:
|
|
690
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
691
|
+
|
|
692
|
+
# Prepare request arguments
|
|
693
|
+
request_args = {
|
|
694
|
+
"model": model,
|
|
695
|
+
"messages": [
|
|
696
|
+
{"role": "system", "content": system_prompt},
|
|
697
|
+
{"role": "user", "content": text},
|
|
698
|
+
],
|
|
699
|
+
"max_tokens": max_tokens,
|
|
700
|
+
"temperature": temperature,
|
|
701
|
+
"top_p": top_p,
|
|
702
|
+
"stream": False,
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
# Make request to Groq API
|
|
706
|
+
response = await self.client.chat.completions.create(**request_args)
|
|
707
|
+
result = response.choices[0].message
|
|
708
|
+
|
|
709
|
+
# Extract summarized text
|
|
710
|
+
summarized_text = result.content
|
|
711
|
+
|
|
712
|
+
# Add final assistant message to conversation
|
|
713
|
+
messages.append({
|
|
714
|
+
"role": "assistant",
|
|
715
|
+
"content": result.content
|
|
716
|
+
})
|
|
717
|
+
|
|
718
|
+
# Update conversation memory
|
|
719
|
+
tools_used = []
|
|
720
|
+
# return only 100 characters of the summarized text
|
|
721
|
+
assistant_content = summarized_text[:100] if summarized_text else ""
|
|
722
|
+
await self._update_conversation_memory(
|
|
723
|
+
user_id,
|
|
724
|
+
session_id,
|
|
725
|
+
conversation_session,
|
|
726
|
+
messages,
|
|
727
|
+
system_prompt,
|
|
728
|
+
turn_id,
|
|
729
|
+
'summarization',
|
|
730
|
+
assistant_content,
|
|
731
|
+
tools_used
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
# Create AIMessage using factory
|
|
735
|
+
ai_message = AIMessageFactory.from_groq(
|
|
736
|
+
response=response,
|
|
737
|
+
input_text=original_prompt,
|
|
738
|
+
model=model,
|
|
739
|
+
user_id=user_id,
|
|
740
|
+
session_id=session_id,
|
|
741
|
+
turn_id=turn_id,
|
|
742
|
+
structured_output=summarized_text
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
return ai_message
|
|
746
|
+
|
|
747
|
+
async def analyze_sentiment(
|
|
748
|
+
self,
|
|
749
|
+
text: str,
|
|
750
|
+
model: Union[GroqModel, str] = GroqModel.KIMI_K2_INSTRUCT,
|
|
751
|
+
temperature: Optional[float] = 0.1,
|
|
752
|
+
max_tokens: int = 1024,
|
|
753
|
+
top_p: float = 0.9,
|
|
754
|
+
user_id: Optional[str] = None,
|
|
755
|
+
session_id: Optional[str] = None,
|
|
756
|
+
use_structured: bool = False
|
|
757
|
+
) -> AIMessage:
|
|
758
|
+
"""
|
|
759
|
+
Analyze the sentiment of a given text.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
text (str): The text content to analyze.
|
|
763
|
+
model (Union[GroqModel, str]): The model to use.
|
|
764
|
+
temperature (float): Sampling temperature for response generation.
|
|
765
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
766
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
767
|
+
"""
|
|
768
|
+
if not self.session:
|
|
769
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
770
|
+
|
|
771
|
+
model = model.value if isinstance(model, GroqModel) else model
|
|
772
|
+
|
|
773
|
+
turn_id = str(uuid.uuid4())
|
|
774
|
+
original_prompt = text
|
|
775
|
+
|
|
776
|
+
if use_structured:
|
|
777
|
+
system_prompt = (
|
|
778
|
+
"You are a sentiment analysis expert. Analyze the sentiment of the given text "
|
|
779
|
+
"and respond with structured data including sentiment classification, "
|
|
780
|
+
"confidence level, emotional indicators, and reasoning."
|
|
781
|
+
)
|
|
782
|
+
else:
|
|
783
|
+
system_prompt = """
|
|
784
|
+
Analyze the sentiment of the following text and provide a structured response.
|
|
785
|
+
Your response should include:
|
|
786
|
+
1. Overall sentiment (Positive, Negative, Neutral, or Mixed)
|
|
787
|
+
2. Confidence level (High, Medium, Low)
|
|
788
|
+
3. Key emotional indicators found in the text
|
|
789
|
+
4. Brief explanation of your analysis
|
|
790
|
+
Format your response clearly with these sections.
|
|
791
|
+
"""
|
|
792
|
+
|
|
793
|
+
messages, conversation_session, system_prompt = await self._prepare_conversation_context(
|
|
794
|
+
original_prompt, None, user_id, session_id, system_prompt
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
if system_prompt:
|
|
798
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
799
|
+
|
|
800
|
+
# Prepare request arguments
|
|
801
|
+
request_args = {
|
|
802
|
+
"model": model,
|
|
803
|
+
"messages": [
|
|
804
|
+
{"role": "system", "content": system_prompt},
|
|
805
|
+
{"role": "user", "content": text},
|
|
806
|
+
],
|
|
807
|
+
"max_tokens": max_tokens,
|
|
808
|
+
"temperature": temperature,
|
|
809
|
+
"top_p": top_p,
|
|
810
|
+
"stream": False,
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
# Add structured output if requested
|
|
814
|
+
structured_output = None
|
|
815
|
+
if use_structured:
|
|
816
|
+
request_args.update(
|
|
817
|
+
self._prepare_structured_output_format(SentimentAnalysis)
|
|
818
|
+
)
|
|
819
|
+
structured_output = SentimentAnalysis
|
|
820
|
+
|
|
821
|
+
# Make request to Groq API
|
|
822
|
+
response = await self.client.chat.completions.create(**request_args)
|
|
823
|
+
result = response.choices[0].message
|
|
824
|
+
|
|
825
|
+
# Extract sentiment analysis result
|
|
826
|
+
sentiment_result = result.content
|
|
827
|
+
|
|
828
|
+
# Add final assistant message to conversation
|
|
829
|
+
messages.append({
|
|
830
|
+
"role": "assistant",
|
|
831
|
+
"content": result.content
|
|
832
|
+
})
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
# Handle structured output
|
|
836
|
+
final_output = None
|
|
837
|
+
if structured_output:
|
|
838
|
+
# Prepare structured output configuration
|
|
839
|
+
output_config = self._get_structured_config(structured_output)
|
|
840
|
+
try:
|
|
841
|
+
final_output = await self._parse_structured_output(
|
|
842
|
+
result.content,
|
|
843
|
+
output_config
|
|
844
|
+
)
|
|
845
|
+
except Exception:
|
|
846
|
+
final_output = result.content
|
|
847
|
+
|
|
848
|
+
# Update conversation memory
|
|
849
|
+
tools_used = []
|
|
850
|
+
# return only 100 characters of the sentiment analysis result
|
|
851
|
+
assistant_content = sentiment_result[:100] if sentiment_result else ""
|
|
852
|
+
await self._update_conversation_memory(
|
|
853
|
+
user_id,
|
|
854
|
+
session_id,
|
|
855
|
+
conversation_session,
|
|
856
|
+
messages,
|
|
857
|
+
system_prompt,
|
|
858
|
+
turn_id,
|
|
859
|
+
'sentiment_analysis',
|
|
860
|
+
assistant_content,
|
|
861
|
+
tools_used
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
# Create AIMessage using factory
|
|
865
|
+
ai_message = AIMessageFactory.from_groq(
|
|
866
|
+
response=response,
|
|
867
|
+
input_text=original_prompt,
|
|
868
|
+
model=model,
|
|
869
|
+
user_id=user_id,
|
|
870
|
+
session_id=session_id,
|
|
871
|
+
turn_id=turn_id,
|
|
872
|
+
structured_output=final_output if final_output is not None else sentiment_result,
|
|
873
|
+
)
|
|
874
|
+
|
|
875
|
+
return ai_message
|
|
876
|
+
|
|
877
|
+
async def analyze_product_review(
|
|
878
|
+
self,
|
|
879
|
+
review_text: str,
|
|
880
|
+
product_id: str,
|
|
881
|
+
product_name: str,
|
|
882
|
+
model: Union[GroqModel, str] = GroqModel.KIMI_K2_INSTRUCT,
|
|
883
|
+
temperature: Optional[float] = 0.1,
|
|
884
|
+
max_tokens: int = 1024,
|
|
885
|
+
top_p: float = 0.9,
|
|
886
|
+
user_id: Optional[str] = None,
|
|
887
|
+
session_id: Optional[str] = None,
|
|
888
|
+
) -> AIMessage:
|
|
889
|
+
"""
|
|
890
|
+
Analyze a product review and extract structured information.
|
|
891
|
+
|
|
892
|
+
Args:
|
|
893
|
+
review_text (str): The product review text to analyze.
|
|
894
|
+
product_id (str): Unique identifier for the product.
|
|
895
|
+
product_name (str): Name of the product being reviewed.
|
|
896
|
+
model (Union[GroqModel, str]): The model to use.
|
|
897
|
+
temperature (float): Sampling temperature for response generation.
|
|
898
|
+
max_tokens (int): Maximum tokens in response.
|
|
899
|
+
top_p (float): Top-p sampling parameter.
|
|
900
|
+
user_id (Optional[str]): Optional user identifier for tracking.
|
|
901
|
+
session_id (Optional[str]): Optional session identifier for tracking.
|
|
902
|
+
"""
|
|
903
|
+
if not self.session:
|
|
904
|
+
raise RuntimeError("Client not initialized. Use async context manager.")
|
|
905
|
+
|
|
906
|
+
turn_id = str(uuid.uuid4())
|
|
907
|
+
original_prompt = review_text
|
|
908
|
+
model = model.value if isinstance(model, GroqModel) else model
|
|
909
|
+
|
|
910
|
+
system_prompt = (
|
|
911
|
+
f"You are a product review analysis expert. Analyze the given product review "
|
|
912
|
+
f"for '{product_name}' (ID: {product_id}) and extract structured information "
|
|
913
|
+
f"including sentiment, rating, and key features mentioned in the review."
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
messages, conversation_session, system_prompt = await self._prepare_conversation_context(
|
|
917
|
+
original_prompt, None, user_id, session_id, system_prompt
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
if system_prompt:
|
|
921
|
+
messages.insert(0, {"role": "system", "content": system_prompt})
|
|
922
|
+
|
|
923
|
+
# Prepare request arguments with structured output
|
|
924
|
+
request_args = {
|
|
925
|
+
"model": model,
|
|
926
|
+
"messages": [
|
|
927
|
+
{"role": "system", "content": system_prompt},
|
|
928
|
+
{"role": "user", "content": f"Product ID: {product_id}\nProduct Name: {product_name}\nReview: {review_text}"},
|
|
929
|
+
],
|
|
930
|
+
"max_tokens": max_tokens,
|
|
931
|
+
"temperature": temperature,
|
|
932
|
+
"top_p": top_p,
|
|
933
|
+
"stream": False,
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
# Add structured output format
|
|
937
|
+
request_args.update(self._prepare_structured_output_format(ProductReview))
|
|
938
|
+
|
|
939
|
+
# Make request to Groq API
|
|
940
|
+
response = await self.client.chat.completions.create(**request_args)
|
|
941
|
+
result = response.choices[0].message
|
|
942
|
+
|
|
943
|
+
# Add final assistant message to conversation
|
|
944
|
+
messages.append({
|
|
945
|
+
"role": "assistant",
|
|
946
|
+
"content": result.content
|
|
947
|
+
})
|
|
948
|
+
|
|
949
|
+
# Update conversation memory
|
|
950
|
+
tools_used = []
|
|
951
|
+
assistant_content = result.content[:100] if result.content else ""
|
|
952
|
+
await self._update_conversation_memory(
|
|
953
|
+
user_id,
|
|
954
|
+
session_id,
|
|
955
|
+
conversation_session,
|
|
956
|
+
messages,
|
|
957
|
+
system_prompt,
|
|
958
|
+
turn_id,
|
|
959
|
+
'product_review_analysis',
|
|
960
|
+
assistant_content,
|
|
961
|
+
tools_used
|
|
962
|
+
)
|
|
963
|
+
# Handle structured output
|
|
964
|
+
final_output = None
|
|
965
|
+
# Prepare structured output configuration
|
|
966
|
+
output_config = self._get_structured_config(ProductReview)
|
|
967
|
+
try:
|
|
968
|
+
final_output = await self._parse_structured_output(
|
|
969
|
+
result.content,
|
|
970
|
+
output_config
|
|
971
|
+
)
|
|
972
|
+
except Exception:
|
|
973
|
+
final_output = result.content
|
|
974
|
+
|
|
975
|
+
# Create AIMessage using factory
|
|
976
|
+
ai_message = AIMessageFactory.from_groq(
|
|
977
|
+
response=response,
|
|
978
|
+
input_text=original_prompt,
|
|
979
|
+
model=model,
|
|
980
|
+
user_id=user_id,
|
|
981
|
+
session_id=session_id,
|
|
982
|
+
turn_id=turn_id,
|
|
983
|
+
structured_output=final_output if final_output is not None else result.content,
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
return ai_message
|