ai-parrot 0.17.2__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentui/.prettierrc +15 -0
- agentui/QUICKSTART.md +272 -0
- agentui/README.md +59 -0
- agentui/env.example +16 -0
- agentui/jsconfig.json +14 -0
- agentui/package-lock.json +4242 -0
- agentui/package.json +34 -0
- agentui/scripts/postinstall/apply-patches.mjs +260 -0
- agentui/src/app.css +61 -0
- agentui/src/app.d.ts +13 -0
- agentui/src/app.html +12 -0
- agentui/src/components/LoadingSpinner.svelte +64 -0
- agentui/src/components/ThemeSwitcher.svelte +159 -0
- agentui/src/components/index.js +4 -0
- agentui/src/lib/api/bots.ts +60 -0
- agentui/src/lib/api/chat.ts +22 -0
- agentui/src/lib/api/http.ts +25 -0
- agentui/src/lib/components/BotCard.svelte +33 -0
- agentui/src/lib/components/ChatBubble.svelte +63 -0
- agentui/src/lib/components/Toast.svelte +21 -0
- agentui/src/lib/config.ts +20 -0
- agentui/src/lib/stores/auth.svelte.ts +73 -0
- agentui/src/lib/stores/theme.svelte.js +64 -0
- agentui/src/lib/stores/toast.svelte.ts +31 -0
- agentui/src/lib/utils/conversation.ts +39 -0
- agentui/src/routes/+layout.svelte +20 -0
- agentui/src/routes/+page.svelte +232 -0
- agentui/src/routes/login/+page.svelte +200 -0
- agentui/src/routes/talk/[agentId]/+page.svelte +297 -0
- agentui/src/routes/talk/[agentId]/+page.ts +7 -0
- agentui/static/README.md +1 -0
- agentui/svelte.config.js +11 -0
- agentui/tailwind.config.ts +53 -0
- agentui/tsconfig.json +3 -0
- agentui/vite.config.ts +10 -0
- ai_parrot-0.17.2.dist-info/METADATA +472 -0
- ai_parrot-0.17.2.dist-info/RECORD +535 -0
- ai_parrot-0.17.2.dist-info/WHEEL +6 -0
- ai_parrot-0.17.2.dist-info/entry_points.txt +2 -0
- ai_parrot-0.17.2.dist-info/licenses/LICENSE +21 -0
- ai_parrot-0.17.2.dist-info/top_level.txt +6 -0
- crew-builder/.prettierrc +15 -0
- crew-builder/QUICKSTART.md +259 -0
- crew-builder/README.md +113 -0
- crew-builder/env.example +17 -0
- crew-builder/jsconfig.json +14 -0
- crew-builder/package-lock.json +4182 -0
- crew-builder/package.json +37 -0
- crew-builder/scripts/postinstall/apply-patches.mjs +260 -0
- crew-builder/src/app.css +62 -0
- crew-builder/src/app.d.ts +13 -0
- crew-builder/src/app.html +12 -0
- crew-builder/src/components/LoadingSpinner.svelte +64 -0
- crew-builder/src/components/ThemeSwitcher.svelte +149 -0
- crew-builder/src/components/index.js +9 -0
- crew-builder/src/lib/api/bots.ts +60 -0
- crew-builder/src/lib/api/chat.ts +80 -0
- crew-builder/src/lib/api/client.ts +56 -0
- crew-builder/src/lib/api/crew/crew.ts +136 -0
- crew-builder/src/lib/api/index.ts +5 -0
- crew-builder/src/lib/api/o365/auth.ts +65 -0
- crew-builder/src/lib/auth/auth.ts +54 -0
- crew-builder/src/lib/components/AgentNode.svelte +43 -0
- crew-builder/src/lib/components/BotCard.svelte +33 -0
- crew-builder/src/lib/components/ChatBubble.svelte +67 -0
- crew-builder/src/lib/components/ConfigPanel.svelte +278 -0
- crew-builder/src/lib/components/JsonTreeNode.svelte +76 -0
- crew-builder/src/lib/components/JsonViewer.svelte +24 -0
- crew-builder/src/lib/components/MarkdownEditor.svelte +48 -0
- crew-builder/src/lib/components/ThemeToggle.svelte +36 -0
- crew-builder/src/lib/components/Toast.svelte +67 -0
- crew-builder/src/lib/components/Toolbar.svelte +157 -0
- crew-builder/src/lib/components/index.ts +10 -0
- crew-builder/src/lib/config.ts +8 -0
- crew-builder/src/lib/stores/auth.svelte.ts +228 -0
- crew-builder/src/lib/stores/crewStore.ts +369 -0
- crew-builder/src/lib/stores/theme.svelte.js +145 -0
- crew-builder/src/lib/stores/toast.svelte.ts +69 -0
- crew-builder/src/lib/utils/conversation.ts +39 -0
- crew-builder/src/lib/utils/markdown.ts +122 -0
- crew-builder/src/lib/utils/talkHistory.ts +47 -0
- crew-builder/src/routes/+layout.svelte +20 -0
- crew-builder/src/routes/+page.svelte +539 -0
- crew-builder/src/routes/agents/+page.svelte +247 -0
- crew-builder/src/routes/agents/[agentId]/+page.svelte +288 -0
- crew-builder/src/routes/agents/[agentId]/+page.ts +7 -0
- crew-builder/src/routes/builder/+page.svelte +204 -0
- crew-builder/src/routes/crew/ask/+page.svelte +1052 -0
- crew-builder/src/routes/crew/ask/+page.ts +1 -0
- crew-builder/src/routes/integrations/o365/+page.svelte +304 -0
- crew-builder/src/routes/login/+page.svelte +197 -0
- crew-builder/src/routes/talk/[agentId]/+page.svelte +487 -0
- crew-builder/src/routes/talk/[agentId]/+page.ts +7 -0
- crew-builder/static/README.md +1 -0
- crew-builder/svelte.config.js +11 -0
- crew-builder/tailwind.config.ts +53 -0
- crew-builder/tsconfig.json +3 -0
- crew-builder/vite.config.ts +10 -0
- mcp_servers/calculator_server.py +309 -0
- parrot/__init__.py +27 -0
- parrot/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/__pycache__/version.cpython-310.pyc +0 -0
- parrot/_version.py +34 -0
- parrot/a2a/__init__.py +48 -0
- parrot/a2a/client.py +658 -0
- parrot/a2a/discovery.py +89 -0
- parrot/a2a/mixin.py +257 -0
- parrot/a2a/models.py +376 -0
- parrot/a2a/server.py +770 -0
- parrot/agents/__init__.py +29 -0
- parrot/bots/__init__.py +12 -0
- parrot/bots/a2a_agent.py +19 -0
- parrot/bots/abstract.py +3139 -0
- parrot/bots/agent.py +1129 -0
- parrot/bots/basic.py +9 -0
- parrot/bots/chatbot.py +669 -0
- parrot/bots/data.py +1618 -0
- parrot/bots/database/__init__.py +5 -0
- parrot/bots/database/abstract.py +3071 -0
- parrot/bots/database/cache.py +286 -0
- parrot/bots/database/models.py +468 -0
- parrot/bots/database/prompts.py +154 -0
- parrot/bots/database/retries.py +98 -0
- parrot/bots/database/router.py +269 -0
- parrot/bots/database/sql.py +41 -0
- parrot/bots/db/__init__.py +6 -0
- parrot/bots/db/abstract.py +556 -0
- parrot/bots/db/bigquery.py +602 -0
- parrot/bots/db/cache.py +85 -0
- parrot/bots/db/documentdb.py +668 -0
- parrot/bots/db/elastic.py +1014 -0
- parrot/bots/db/influx.py +898 -0
- parrot/bots/db/mock.py +96 -0
- parrot/bots/db/multi.py +783 -0
- parrot/bots/db/prompts.py +185 -0
- parrot/bots/db/sql.py +1255 -0
- parrot/bots/db/tools.py +212 -0
- parrot/bots/document.py +680 -0
- parrot/bots/hrbot.py +15 -0
- parrot/bots/kb.py +170 -0
- parrot/bots/mcp.py +36 -0
- parrot/bots/orchestration/README.md +463 -0
- parrot/bots/orchestration/__init__.py +1 -0
- parrot/bots/orchestration/agent.py +155 -0
- parrot/bots/orchestration/crew.py +3330 -0
- parrot/bots/orchestration/fsm.py +1179 -0
- parrot/bots/orchestration/hr.py +434 -0
- parrot/bots/orchestration/storage/__init__.py +4 -0
- parrot/bots/orchestration/storage/memory.py +100 -0
- parrot/bots/orchestration/storage/mixin.py +119 -0
- parrot/bots/orchestration/verify.py +202 -0
- parrot/bots/product.py +204 -0
- parrot/bots/prompts/__init__.py +96 -0
- parrot/bots/prompts/agents.py +155 -0
- parrot/bots/prompts/data.py +216 -0
- parrot/bots/prompts/output_generation.py +8 -0
- parrot/bots/scraper/__init__.py +3 -0
- parrot/bots/scraper/models.py +122 -0
- parrot/bots/scraper/scraper.py +1173 -0
- parrot/bots/scraper/templates.py +115 -0
- parrot/bots/stores/__init__.py +5 -0
- parrot/bots/stores/local.py +172 -0
- parrot/bots/webdev.py +81 -0
- parrot/cli.py +17 -0
- parrot/clients/__init__.py +16 -0
- parrot/clients/base.py +1491 -0
- parrot/clients/claude.py +1191 -0
- parrot/clients/factory.py +129 -0
- parrot/clients/google.py +4567 -0
- parrot/clients/gpt.py +1975 -0
- parrot/clients/grok.py +432 -0
- parrot/clients/groq.py +986 -0
- parrot/clients/hf.py +582 -0
- parrot/clients/models.py +18 -0
- parrot/conf.py +395 -0
- parrot/embeddings/__init__.py +9 -0
- parrot/embeddings/base.py +157 -0
- parrot/embeddings/google.py +98 -0
- parrot/embeddings/huggingface.py +74 -0
- parrot/embeddings/openai.py +84 -0
- parrot/embeddings/processor.py +88 -0
- parrot/exceptions.c +13868 -0
- parrot/exceptions.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/exceptions.pxd +22 -0
- parrot/exceptions.pxi +15 -0
- parrot/exceptions.pyx +44 -0
- parrot/generators/__init__.py +29 -0
- parrot/generators/base.py +200 -0
- parrot/generators/html.py +293 -0
- parrot/generators/react.py +205 -0
- parrot/generators/streamlit.py +203 -0
- parrot/generators/template.py +105 -0
- parrot/handlers/__init__.py +4 -0
- parrot/handlers/agent.py +861 -0
- parrot/handlers/agents/__init__.py +1 -0
- parrot/handlers/agents/abstract.py +900 -0
- parrot/handlers/bots.py +338 -0
- parrot/handlers/chat.py +915 -0
- parrot/handlers/creation.sql +192 -0
- parrot/handlers/crew/ARCHITECTURE.md +362 -0
- parrot/handlers/crew/README_BOTMANAGER_PERSISTENCE.md +303 -0
- parrot/handlers/crew/README_REDIS_PERSISTENCE.md +366 -0
- parrot/handlers/crew/__init__.py +0 -0
- parrot/handlers/crew/handler.py +801 -0
- parrot/handlers/crew/models.py +229 -0
- parrot/handlers/crew/redis_persistence.py +523 -0
- parrot/handlers/jobs/__init__.py +10 -0
- parrot/handlers/jobs/job.py +384 -0
- parrot/handlers/jobs/mixin.py +627 -0
- parrot/handlers/jobs/models.py +115 -0
- parrot/handlers/jobs/worker.py +31 -0
- parrot/handlers/models.py +596 -0
- parrot/handlers/o365_auth.py +105 -0
- parrot/handlers/stream.py +337 -0
- parrot/interfaces/__init__.py +6 -0
- parrot/interfaces/aws.py +143 -0
- parrot/interfaces/credentials.py +113 -0
- parrot/interfaces/database.py +27 -0
- parrot/interfaces/google.py +1123 -0
- parrot/interfaces/hierarchy.py +1227 -0
- parrot/interfaces/http.py +651 -0
- parrot/interfaces/images/__init__.py +0 -0
- parrot/interfaces/images/plugins/__init__.py +24 -0
- parrot/interfaces/images/plugins/abstract.py +58 -0
- parrot/interfaces/images/plugins/analisys.py +148 -0
- parrot/interfaces/images/plugins/classify.py +150 -0
- parrot/interfaces/images/plugins/classifybase.py +182 -0
- parrot/interfaces/images/plugins/detect.py +150 -0
- parrot/interfaces/images/plugins/exif.py +1103 -0
- parrot/interfaces/images/plugins/hash.py +52 -0
- parrot/interfaces/images/plugins/vision.py +104 -0
- parrot/interfaces/images/plugins/yolo.py +66 -0
- parrot/interfaces/images/plugins/zerodetect.py +197 -0
- parrot/interfaces/o365.py +978 -0
- parrot/interfaces/onedrive.py +822 -0
- parrot/interfaces/sharepoint.py +1435 -0
- parrot/interfaces/soap.py +257 -0
- parrot/loaders/__init__.py +8 -0
- parrot/loaders/abstract.py +1131 -0
- parrot/loaders/audio.py +199 -0
- parrot/loaders/basepdf.py +53 -0
- parrot/loaders/basevideo.py +1568 -0
- parrot/loaders/csv.py +409 -0
- parrot/loaders/docx.py +116 -0
- parrot/loaders/epubloader.py +316 -0
- parrot/loaders/excel.py +199 -0
- parrot/loaders/factory.py +55 -0
- parrot/loaders/files/__init__.py +0 -0
- parrot/loaders/files/abstract.py +39 -0
- parrot/loaders/files/html.py +26 -0
- parrot/loaders/files/text.py +63 -0
- parrot/loaders/html.py +152 -0
- parrot/loaders/markdown.py +442 -0
- parrot/loaders/pdf.py +373 -0
- parrot/loaders/pdfmark.py +320 -0
- parrot/loaders/pdftables.py +506 -0
- parrot/loaders/ppt.py +476 -0
- parrot/loaders/qa.py +63 -0
- parrot/loaders/splitters/__init__.py +10 -0
- parrot/loaders/splitters/base.py +138 -0
- parrot/loaders/splitters/md.py +228 -0
- parrot/loaders/splitters/token.py +143 -0
- parrot/loaders/txt.py +26 -0
- parrot/loaders/video.py +89 -0
- parrot/loaders/videolocal.py +218 -0
- parrot/loaders/videounderstanding.py +377 -0
- parrot/loaders/vimeo.py +167 -0
- parrot/loaders/web.py +599 -0
- parrot/loaders/youtube.py +504 -0
- parrot/manager/__init__.py +5 -0
- parrot/manager/manager.py +1030 -0
- parrot/mcp/__init__.py +28 -0
- parrot/mcp/adapter.py +105 -0
- parrot/mcp/cli.py +174 -0
- parrot/mcp/client.py +119 -0
- parrot/mcp/config.py +75 -0
- parrot/mcp/integration.py +842 -0
- parrot/mcp/oauth.py +933 -0
- parrot/mcp/server.py +225 -0
- parrot/mcp/transports/__init__.py +3 -0
- parrot/mcp/transports/base.py +279 -0
- parrot/mcp/transports/grpc_session.py +163 -0
- parrot/mcp/transports/http.py +312 -0
- parrot/mcp/transports/mcp.proto +108 -0
- parrot/mcp/transports/quic.py +1082 -0
- parrot/mcp/transports/sse.py +330 -0
- parrot/mcp/transports/stdio.py +309 -0
- parrot/mcp/transports/unix.py +395 -0
- parrot/mcp/transports/websocket.py +547 -0
- parrot/memory/__init__.py +16 -0
- parrot/memory/abstract.py +209 -0
- parrot/memory/agent.py +32 -0
- parrot/memory/cache.py +175 -0
- parrot/memory/core.py +555 -0
- parrot/memory/file.py +153 -0
- parrot/memory/mem.py +131 -0
- parrot/memory/redis.py +613 -0
- parrot/models/__init__.py +46 -0
- parrot/models/basic.py +118 -0
- parrot/models/compliance.py +208 -0
- parrot/models/crew.py +395 -0
- parrot/models/detections.py +654 -0
- parrot/models/generation.py +85 -0
- parrot/models/google.py +223 -0
- parrot/models/groq.py +23 -0
- parrot/models/openai.py +30 -0
- parrot/models/outputs.py +285 -0
- parrot/models/responses.py +938 -0
- parrot/notifications/__init__.py +743 -0
- parrot/openapi/__init__.py +3 -0
- parrot/openapi/components.yaml +641 -0
- parrot/openapi/config.py +322 -0
- parrot/outputs/__init__.py +32 -0
- parrot/outputs/formats/__init__.py +108 -0
- parrot/outputs/formats/altair.py +359 -0
- parrot/outputs/formats/application.py +122 -0
- parrot/outputs/formats/base.py +351 -0
- parrot/outputs/formats/bokeh.py +356 -0
- parrot/outputs/formats/card.py +424 -0
- parrot/outputs/formats/chart.py +436 -0
- parrot/outputs/formats/d3.py +255 -0
- parrot/outputs/formats/echarts.py +310 -0
- parrot/outputs/formats/generators/__init__.py +0 -0
- parrot/outputs/formats/generators/abstract.py +61 -0
- parrot/outputs/formats/generators/panel.py +145 -0
- parrot/outputs/formats/generators/streamlit.py +86 -0
- parrot/outputs/formats/generators/terminal.py +63 -0
- parrot/outputs/formats/holoviews.py +310 -0
- parrot/outputs/formats/html.py +147 -0
- parrot/outputs/formats/jinja2.py +46 -0
- parrot/outputs/formats/json.py +87 -0
- parrot/outputs/formats/map.py +933 -0
- parrot/outputs/formats/markdown.py +172 -0
- parrot/outputs/formats/matplotlib.py +237 -0
- parrot/outputs/formats/mixins/__init__.py +0 -0
- parrot/outputs/formats/mixins/emaps.py +855 -0
- parrot/outputs/formats/plotly.py +341 -0
- parrot/outputs/formats/seaborn.py +310 -0
- parrot/outputs/formats/table.py +397 -0
- parrot/outputs/formats/template_report.py +138 -0
- parrot/outputs/formats/yaml.py +125 -0
- parrot/outputs/formatter.py +152 -0
- parrot/outputs/templates/__init__.py +95 -0
- parrot/pipelines/__init__.py +0 -0
- parrot/pipelines/abstract.py +210 -0
- parrot/pipelines/detector.py +124 -0
- parrot/pipelines/models.py +90 -0
- parrot/pipelines/planogram.py +3002 -0
- parrot/pipelines/table.sql +97 -0
- parrot/plugins/__init__.py +106 -0
- parrot/plugins/importer.py +80 -0
- parrot/py.typed +0 -0
- parrot/registry/__init__.py +18 -0
- parrot/registry/registry.py +594 -0
- parrot/scheduler/__init__.py +1189 -0
- parrot/scheduler/models.py +60 -0
- parrot/security/__init__.py +16 -0
- parrot/security/prompt_injection.py +268 -0
- parrot/security/security_events.sql +25 -0
- parrot/services/__init__.py +1 -0
- parrot/services/mcp/__init__.py +8 -0
- parrot/services/mcp/config.py +13 -0
- parrot/services/mcp/server.py +295 -0
- parrot/services/o365_remote_auth.py +235 -0
- parrot/stores/__init__.py +7 -0
- parrot/stores/abstract.py +352 -0
- parrot/stores/arango.py +1090 -0
- parrot/stores/bigquery.py +1377 -0
- parrot/stores/cache.py +106 -0
- parrot/stores/empty.py +10 -0
- parrot/stores/faiss_store.py +1157 -0
- parrot/stores/kb/__init__.py +9 -0
- parrot/stores/kb/abstract.py +68 -0
- parrot/stores/kb/cache.py +165 -0
- parrot/stores/kb/doc.py +325 -0
- parrot/stores/kb/hierarchy.py +346 -0
- parrot/stores/kb/local.py +457 -0
- parrot/stores/kb/prompt.py +28 -0
- parrot/stores/kb/redis.py +659 -0
- parrot/stores/kb/store.py +115 -0
- parrot/stores/kb/user.py +374 -0
- parrot/stores/models.py +59 -0
- parrot/stores/pgvector.py +3 -0
- parrot/stores/postgres.py +2853 -0
- parrot/stores/utils/__init__.py +0 -0
- parrot/stores/utils/chunking.py +197 -0
- parrot/telemetry/__init__.py +3 -0
- parrot/telemetry/mixin.py +111 -0
- parrot/template/__init__.py +3 -0
- parrot/template/engine.py +259 -0
- parrot/tools/__init__.py +23 -0
- parrot/tools/abstract.py +644 -0
- parrot/tools/agent.py +363 -0
- parrot/tools/arangodbsearch.py +537 -0
- parrot/tools/arxiv_tool.py +188 -0
- parrot/tools/calculator/__init__.py +3 -0
- parrot/tools/calculator/operations/__init__.py +38 -0
- parrot/tools/calculator/operations/calculus.py +80 -0
- parrot/tools/calculator/operations/statistics.py +76 -0
- parrot/tools/calculator/tool.py +150 -0
- parrot/tools/cloudwatch.py +988 -0
- parrot/tools/codeinterpreter/__init__.py +127 -0
- parrot/tools/codeinterpreter/executor.py +371 -0
- parrot/tools/codeinterpreter/internals.py +473 -0
- parrot/tools/codeinterpreter/models.py +643 -0
- parrot/tools/codeinterpreter/prompts.py +224 -0
- parrot/tools/codeinterpreter/tool.py +664 -0
- parrot/tools/company_info/__init__.py +6 -0
- parrot/tools/company_info/tool.py +1138 -0
- parrot/tools/correlationanalysis.py +437 -0
- parrot/tools/database/abstract.py +286 -0
- parrot/tools/database/bq.py +115 -0
- parrot/tools/database/cache.py +284 -0
- parrot/tools/database/models.py +95 -0
- parrot/tools/database/pg.py +343 -0
- parrot/tools/databasequery.py +1159 -0
- parrot/tools/db.py +1800 -0
- parrot/tools/ddgo.py +370 -0
- parrot/tools/decorators.py +271 -0
- parrot/tools/dftohtml.py +282 -0
- parrot/tools/document.py +549 -0
- parrot/tools/ecs.py +819 -0
- parrot/tools/edareport.py +368 -0
- parrot/tools/elasticsearch.py +1049 -0
- parrot/tools/employees.py +462 -0
- parrot/tools/epson/__init__.py +96 -0
- parrot/tools/excel.py +683 -0
- parrot/tools/file/__init__.py +13 -0
- parrot/tools/file/abstract.py +76 -0
- parrot/tools/file/gcs.py +378 -0
- parrot/tools/file/local.py +284 -0
- parrot/tools/file/s3.py +511 -0
- parrot/tools/file/tmp.py +309 -0
- parrot/tools/file/tool.py +501 -0
- parrot/tools/file_reader.py +129 -0
- parrot/tools/flowtask/__init__.py +19 -0
- parrot/tools/flowtask/tool.py +761 -0
- parrot/tools/gittoolkit.py +508 -0
- parrot/tools/google/__init__.py +18 -0
- parrot/tools/google/base.py +169 -0
- parrot/tools/google/tools.py +1251 -0
- parrot/tools/googlelocation.py +5 -0
- parrot/tools/googleroutes.py +5 -0
- parrot/tools/googlesearch.py +5 -0
- parrot/tools/googlesitesearch.py +5 -0
- parrot/tools/googlevoice.py +2 -0
- parrot/tools/gvoice.py +695 -0
- parrot/tools/ibisworld/README.md +225 -0
- parrot/tools/ibisworld/__init__.py +11 -0
- parrot/tools/ibisworld/tool.py +366 -0
- parrot/tools/jiratoolkit.py +1718 -0
- parrot/tools/manager.py +1098 -0
- parrot/tools/math.py +152 -0
- parrot/tools/metadata.py +476 -0
- parrot/tools/msteams.py +1621 -0
- parrot/tools/msword.py +635 -0
- parrot/tools/multidb.py +580 -0
- parrot/tools/multistoresearch.py +369 -0
- parrot/tools/networkninja.py +167 -0
- parrot/tools/nextstop/__init__.py +4 -0
- parrot/tools/nextstop/base.py +286 -0
- parrot/tools/nextstop/employee.py +733 -0
- parrot/tools/nextstop/store.py +462 -0
- parrot/tools/notification.py +435 -0
- parrot/tools/o365/__init__.py +42 -0
- parrot/tools/o365/base.py +295 -0
- parrot/tools/o365/bundle.py +522 -0
- parrot/tools/o365/events.py +554 -0
- parrot/tools/o365/mail.py +992 -0
- parrot/tools/o365/onedrive.py +497 -0
- parrot/tools/o365/sharepoint.py +641 -0
- parrot/tools/openapi_toolkit.py +904 -0
- parrot/tools/openweather.py +527 -0
- parrot/tools/pdfprint.py +1001 -0
- parrot/tools/powerbi.py +518 -0
- parrot/tools/powerpoint.py +1113 -0
- parrot/tools/pricestool.py +146 -0
- parrot/tools/products/__init__.py +246 -0
- parrot/tools/prophet_tool.py +171 -0
- parrot/tools/pythonpandas.py +630 -0
- parrot/tools/pythonrepl.py +910 -0
- parrot/tools/qsource.py +436 -0
- parrot/tools/querytoolkit.py +395 -0
- parrot/tools/quickeda.py +827 -0
- parrot/tools/resttool.py +553 -0
- parrot/tools/retail/__init__.py +0 -0
- parrot/tools/retail/bby.py +528 -0
- parrot/tools/sandboxtool.py +703 -0
- parrot/tools/sassie/__init__.py +352 -0
- parrot/tools/scraping/__init__.py +7 -0
- parrot/tools/scraping/docs/select.md +466 -0
- parrot/tools/scraping/documentation.md +1278 -0
- parrot/tools/scraping/driver.py +436 -0
- parrot/tools/scraping/models.py +576 -0
- parrot/tools/scraping/options.py +85 -0
- parrot/tools/scraping/orchestrator.py +517 -0
- parrot/tools/scraping/readme.md +740 -0
- parrot/tools/scraping/tool.py +3115 -0
- parrot/tools/seasonaldetection.py +642 -0
- parrot/tools/shell_tool/__init__.py +5 -0
- parrot/tools/shell_tool/actions.py +408 -0
- parrot/tools/shell_tool/engine.py +155 -0
- parrot/tools/shell_tool/models.py +322 -0
- parrot/tools/shell_tool/tool.py +442 -0
- parrot/tools/site_search.py +214 -0
- parrot/tools/textfile.py +418 -0
- parrot/tools/think.py +378 -0
- parrot/tools/toolkit.py +298 -0
- parrot/tools/webapp_tool.py +187 -0
- parrot/tools/whatif.py +1279 -0
- parrot/tools/workday/MULTI_WSDL_EXAMPLE.md +249 -0
- parrot/tools/workday/__init__.py +6 -0
- parrot/tools/workday/models.py +1389 -0
- parrot/tools/workday/tool.py +1293 -0
- parrot/tools/yfinance_tool.py +306 -0
- parrot/tools/zipcode.py +217 -0
- parrot/utils/__init__.py +2 -0
- parrot/utils/helpers.py +73 -0
- parrot/utils/parsers/__init__.py +5 -0
- parrot/utils/parsers/toml.c +12078 -0
- parrot/utils/parsers/toml.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/parsers/toml.pyx +21 -0
- parrot/utils/toml.py +11 -0
- parrot/utils/types.cpp +20936 -0
- parrot/utils/types.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/types.pyx +213 -0
- parrot/utils/uv.py +11 -0
- parrot/version.py +10 -0
- parrot/yaml-rs/Cargo.lock +350 -0
- parrot/yaml-rs/Cargo.toml +19 -0
- parrot/yaml-rs/pyproject.toml +19 -0
- parrot/yaml-rs/python/yaml_rs/__init__.py +81 -0
- parrot/yaml-rs/src/lib.rs +222 -0
- requirements/docker-compose.yml +24 -0
- requirements/requirements-dev.txt +21 -0
parrot/bots/db/influx.py
ADDED
|
@@ -0,0 +1,898 @@
|
|
|
1
|
+
"""
|
|
2
|
+
InfluxDB Agent Implementation for AI-Parrot.
|
|
3
|
+
|
|
4
|
+
Concrete implementation of AbstractDbAgent for InfluxDB
|
|
5
|
+
with support for Flux query language and time-series data analysis.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, Any, List, Optional, Union
|
|
9
|
+
import asyncio
|
|
10
|
+
import re
|
|
11
|
+
from urllib.parse import urlparse
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pydantic import Field
|
|
14
|
+
from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
|
|
15
|
+
from influxdb_client import Point
|
|
16
|
+
from influxdb_client.client.write_api import SYNCHRONOUS
|
|
17
|
+
|
|
18
|
+
from .abstract import (
|
|
19
|
+
AbstractDBAgent,
|
|
20
|
+
DatabaseSchema,
|
|
21
|
+
TableMetadata,
|
|
22
|
+
QueryGenerationArgs
|
|
23
|
+
)
|
|
24
|
+
from ...tools.abstract import AbstractTool, ToolResult, AbstractToolArgsSchema
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class FluxQueryExecutionArgs(AbstractToolArgsSchema):
|
|
28
|
+
"""Arguments for Flux query execution."""
|
|
29
|
+
query: str = Field(description="Flux query to execute")
|
|
30
|
+
limit: Optional[int] = Field(
|
|
31
|
+
default=1000, description="Maximum number of records to return")
|
|
32
|
+
timeout: int = Field(default=30, description="Query timeout in seconds")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class InfluxMeasurementMetadata:
|
|
36
|
+
"""Metadata for InfluxDB measurements (equivalent to tables)."""
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
name: str,
|
|
40
|
+
bucket: str,
|
|
41
|
+
tags: List[str],
|
|
42
|
+
fields: List[Dict[str, str]],
|
|
43
|
+
time_range: Dict[str, Any],
|
|
44
|
+
sample_records: List[Dict[str, Any]] = None
|
|
45
|
+
):
|
|
46
|
+
self.name = name
|
|
47
|
+
self.bucket = bucket
|
|
48
|
+
self.tags = tags
|
|
49
|
+
self.fields = fields
|
|
50
|
+
self.time_range = time_range
|
|
51
|
+
self.sample_records = sample_records or []
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class InfluxDBAgent(AbstractDBAgent):
|
|
55
|
+
"""
|
|
56
|
+
InfluxDB Agent for time-series database introspection and Flux query generation.
|
|
57
|
+
|
|
58
|
+
Supports InfluxDB 2.x with Flux query language.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(
|
|
62
|
+
self,
|
|
63
|
+
name: str = "InfluxDBAgent",
|
|
64
|
+
connection_string: str = None,
|
|
65
|
+
token: str = None,
|
|
66
|
+
org: str = None,
|
|
67
|
+
bucket: str = None,
|
|
68
|
+
max_sample_records: int = 10,
|
|
69
|
+
default_time_range: str = "-30d",
|
|
70
|
+
**kwargs
|
|
71
|
+
):
|
|
72
|
+
"""
|
|
73
|
+
Initialize InfluxDB Agent.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
name: Agent name
|
|
77
|
+
connection_string: InfluxDB URL (e.g., 'http://localhost:8086')
|
|
78
|
+
token: InfluxDB authentication token
|
|
79
|
+
org: InfluxDB organization
|
|
80
|
+
bucket: Default bucket name (can be overridden)
|
|
81
|
+
max_sample_records: Maximum sample records per measurement
|
|
82
|
+
default_time_range: Default time range for queries (e.g., '-30d', '-1h')
|
|
83
|
+
"""
|
|
84
|
+
self.token = token
|
|
85
|
+
self.org = org
|
|
86
|
+
self.bucket = bucket
|
|
87
|
+
self.max_sample_records = max_sample_records
|
|
88
|
+
self.default_time_range = default_time_range
|
|
89
|
+
self.client: Optional[InfluxDBClientAsync] = None
|
|
90
|
+
self.measurements_cache: Dict[str, InfluxMeasurementMetadata] = {}
|
|
91
|
+
|
|
92
|
+
super().__init__(
|
|
93
|
+
name=name,
|
|
94
|
+
connection_string=connection_string,
|
|
95
|
+
schema_name=bucket, # Use bucket as schema equivalent
|
|
96
|
+
**kwargs
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Add InfluxDB-specific tools
|
|
100
|
+
self._setup_influx_tools()
|
|
101
|
+
|
|
102
|
+
def _setup_influx_tools(self):
|
|
103
|
+
"""Setup InfluxDB-specific tools."""
|
|
104
|
+
# Add Flux query execution tool
|
|
105
|
+
flux_execution_tool = FluxQueryExecutionTool(agent=self)
|
|
106
|
+
self.tool_manager.register_tool(flux_execution_tool)
|
|
107
|
+
|
|
108
|
+
# Add measurement exploration tool
|
|
109
|
+
measurement_tool = MeasurementExplorationTool(agent=self)
|
|
110
|
+
self.tool_manager.register_tool(measurement_tool)
|
|
111
|
+
|
|
112
|
+
async def connect_database(self) -> None:
|
|
113
|
+
"""Connect to InfluxDB using async client."""
|
|
114
|
+
if not self.connection_string:
|
|
115
|
+
raise ValueError("InfluxDB URL is required")
|
|
116
|
+
if not self.token:
|
|
117
|
+
raise ValueError("InfluxDB token is required")
|
|
118
|
+
if not self.org:
|
|
119
|
+
raise ValueError("InfluxDB organization is required")
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
self.client = InfluxDBClientAsync(
|
|
123
|
+
url=self.connection_string,
|
|
124
|
+
token=self.token,
|
|
125
|
+
org=self.org,
|
|
126
|
+
timeout=30000 # 30 seconds timeout
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Test connection by getting org info
|
|
130
|
+
orgs_api = self.client.organizations_api()
|
|
131
|
+
orgs = await orgs_api.find_organizations()
|
|
132
|
+
|
|
133
|
+
if not any(org.name == self.org for org in orgs):
|
|
134
|
+
raise ValueError(f"Organization '{self.org}' not found")
|
|
135
|
+
|
|
136
|
+
self.logger.info(f"Successfully connected to InfluxDB at {self.connection_string}")
|
|
137
|
+
|
|
138
|
+
except Exception as e:
|
|
139
|
+
self.logger.error(f"Failed to connect to InfluxDB: {e}")
|
|
140
|
+
raise
|
|
141
|
+
|
|
142
|
+
async def extract_schema_metadata(self) -> DatabaseSchema:
|
|
143
|
+
"""Extract schema metadata from InfluxDB (buckets, measurements, fields, tags)."""
|
|
144
|
+
if not self.client:
|
|
145
|
+
await self.connect_database()
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
# Get all buckets if no specific bucket is set
|
|
149
|
+
buckets_to_analyze = []
|
|
150
|
+
if self.bucket:
|
|
151
|
+
buckets_to_analyze = [self.bucket]
|
|
152
|
+
else:
|
|
153
|
+
buckets_api = self.client.buckets_api()
|
|
154
|
+
buckets = await buckets_api.find_buckets()
|
|
155
|
+
buckets_to_analyze = [bucket.name for bucket in buckets if not bucket.name.startswith('_')]
|
|
156
|
+
|
|
157
|
+
# Extract measurements from each bucket
|
|
158
|
+
all_measurements = []
|
|
159
|
+
for bucket_name in buckets_to_analyze:
|
|
160
|
+
measurements = await self._extract_measurements_from_bucket(bucket_name)
|
|
161
|
+
all_measurements.extend(measurements)
|
|
162
|
+
|
|
163
|
+
# Convert measurements to TableMetadata format
|
|
164
|
+
tables = self._convert_measurements_to_tables(all_measurements)
|
|
165
|
+
|
|
166
|
+
schema_metadata = DatabaseSchema(
|
|
167
|
+
database_name=self.org,
|
|
168
|
+
database_type="influxdb",
|
|
169
|
+
tables=tables,
|
|
170
|
+
views=[], # InfluxDB doesn't have views
|
|
171
|
+
functions=[], # InfluxDB doesn't have stored functions
|
|
172
|
+
procedures=[], # InfluxDB doesn't have stored procedures
|
|
173
|
+
metadata={
|
|
174
|
+
"buckets_analyzed": buckets_to_analyze,
|
|
175
|
+
"total_measurements": len(all_measurements),
|
|
176
|
+
"extraction_timestamp": datetime.now().isoformat(),
|
|
177
|
+
"time_range_analyzed": self.default_time_range
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
self.logger.info(
|
|
182
|
+
f"Extracted metadata for {len(all_measurements)} measurements from {len(buckets_to_analyze)} buckets"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return schema_metadata
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
self.logger.error(f"Failed to extract InfluxDB schema metadata: {e}")
|
|
189
|
+
raise
|
|
190
|
+
|
|
191
|
+
async def _extract_measurements_from_bucket(self, bucket_name: str) -> List[InfluxMeasurementMetadata]:
|
|
192
|
+
"""Extract all measurements from a specific bucket."""
|
|
193
|
+
query_api = self.client.query_api()
|
|
194
|
+
|
|
195
|
+
# Query to get all measurements in the bucket
|
|
196
|
+
measurements_query = f'''
|
|
197
|
+
import "influxdata/influxdb/schema"
|
|
198
|
+
schema.measurements(bucket: "{bucket_name}")
|
|
199
|
+
'''
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
result = await query_api.query(measurements_query)
|
|
203
|
+
measurements_data = []
|
|
204
|
+
|
|
205
|
+
for table in result:
|
|
206
|
+
for record in table.records:
|
|
207
|
+
measurement_name = record.get_value()
|
|
208
|
+
if measurement_name:
|
|
209
|
+
# Get detailed metadata for this measurement
|
|
210
|
+
measurement_metadata = await self._extract_measurement_metadata(
|
|
211
|
+
bucket_name, measurement_name
|
|
212
|
+
)
|
|
213
|
+
measurements_data.append(measurement_metadata)
|
|
214
|
+
|
|
215
|
+
# Cache for later use
|
|
216
|
+
cache_key = f"{bucket_name}.{measurement_name}"
|
|
217
|
+
self.measurements_cache[cache_key] = measurement_metadata
|
|
218
|
+
|
|
219
|
+
return measurements_data
|
|
220
|
+
|
|
221
|
+
except Exception as e:
|
|
222
|
+
self.logger.warning(f"Could not extract measurements from bucket {bucket_name}: {e}")
|
|
223
|
+
return []
|
|
224
|
+
|
|
225
|
+
async def _extract_measurement_metadata(
|
|
226
|
+
self,
|
|
227
|
+
bucket_name: str,
|
|
228
|
+
measurement_name: str
|
|
229
|
+
) -> InfluxMeasurementMetadata:
|
|
230
|
+
"""Extract detailed metadata for a specific measurement."""
|
|
231
|
+
query_api = self.client.query_api()
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
# Get tag keys
|
|
235
|
+
tags_query = f'''
|
|
236
|
+
import "influxdata/influxdb/schema"
|
|
237
|
+
schema.tagKeys(
|
|
238
|
+
bucket: "{bucket_name}",
|
|
239
|
+
predicate: (r) => r._measurement == "{measurement_name}",
|
|
240
|
+
start: {self.default_time_range}
|
|
241
|
+
)
|
|
242
|
+
'''
|
|
243
|
+
|
|
244
|
+
tags_result = await query_api.query(tags_query)
|
|
245
|
+
tags = []
|
|
246
|
+
for table in tags_result:
|
|
247
|
+
for record in table.records:
|
|
248
|
+
tag_key = record.get_value()
|
|
249
|
+
if tag_key:
|
|
250
|
+
tags.append(tag_key)
|
|
251
|
+
|
|
252
|
+
# Get field keys and types
|
|
253
|
+
fields_query = f'''
|
|
254
|
+
import "influxdata/influxdb/schema"
|
|
255
|
+
schema.fieldKeys(
|
|
256
|
+
bucket: "{bucket_name}",
|
|
257
|
+
predicate: (r) => r._measurement == "{measurement_name}",
|
|
258
|
+
start: {self.default_time_range}
|
|
259
|
+
)
|
|
260
|
+
'''
|
|
261
|
+
|
|
262
|
+
fields_result = await query_api.query(fields_query)
|
|
263
|
+
fields = []
|
|
264
|
+
for table in fields_result:
|
|
265
|
+
for record in table.records:
|
|
266
|
+
field_key = record.get_value()
|
|
267
|
+
if field_key:
|
|
268
|
+
# Try to determine field type by sampling
|
|
269
|
+
field_type = await self._determine_field_type(
|
|
270
|
+
bucket_name, measurement_name, field_key
|
|
271
|
+
)
|
|
272
|
+
fields.append({
|
|
273
|
+
"name": field_key,
|
|
274
|
+
"type": field_type
|
|
275
|
+
})
|
|
276
|
+
|
|
277
|
+
# Get time range for this measurement
|
|
278
|
+
time_range = await self._get_measurement_time_range(bucket_name, measurement_name)
|
|
279
|
+
|
|
280
|
+
# Get sample records
|
|
281
|
+
sample_records = await self._get_sample_records(bucket_name, measurement_name)
|
|
282
|
+
|
|
283
|
+
return InfluxMeasurementMetadata(
|
|
284
|
+
name=measurement_name,
|
|
285
|
+
bucket=bucket_name,
|
|
286
|
+
tags=tags,
|
|
287
|
+
fields=fields,
|
|
288
|
+
time_range=time_range,
|
|
289
|
+
sample_records=sample_records
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
self.logger.warning(
|
|
294
|
+
f"Could not extract metadata for measurement {measurement_name}: {e}"
|
|
295
|
+
)
|
|
296
|
+
return InfluxMeasurementMetadata(
|
|
297
|
+
name=measurement_name,
|
|
298
|
+
bucket=bucket_name,
|
|
299
|
+
tags=[],
|
|
300
|
+
fields=[],
|
|
301
|
+
time_range={},
|
|
302
|
+
sample_records=[]
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
async def _determine_field_type(
|
|
306
|
+
self,
|
|
307
|
+
bucket_name: str,
|
|
308
|
+
measurement_name: str,
|
|
309
|
+
field_key: str
|
|
310
|
+
) -> str:
|
|
311
|
+
"""Determine the data type of a field by sampling."""
|
|
312
|
+
query_api = self.client.query_api()
|
|
313
|
+
|
|
314
|
+
type_query = f'''
|
|
315
|
+
from(bucket: "{bucket_name}")
|
|
316
|
+
|> range(start: {self.default_time_range})
|
|
317
|
+
|> filter(fn: (r) => r._measurement == "{measurement_name}")
|
|
318
|
+
|> filter(fn: (r) => r._field == "{field_key}")
|
|
319
|
+
|> limit(n: 1)
|
|
320
|
+
'''
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
result = await query_api.query(type_query)
|
|
324
|
+
for table in result:
|
|
325
|
+
for record in table.records:
|
|
326
|
+
value = record.get_value()
|
|
327
|
+
if isinstance(value, bool):
|
|
328
|
+
return "boolean"
|
|
329
|
+
elif isinstance(value, int):
|
|
330
|
+
return "integer"
|
|
331
|
+
elif isinstance(value, float):
|
|
332
|
+
return "float"
|
|
333
|
+
elif isinstance(value, str):
|
|
334
|
+
return "string"
|
|
335
|
+
else:
|
|
336
|
+
return "unknown"
|
|
337
|
+
except:
|
|
338
|
+
pass
|
|
339
|
+
|
|
340
|
+
return "unknown"
|
|
341
|
+
|
|
342
|
+
async def _get_measurement_time_range(
|
|
343
|
+
self,
|
|
344
|
+
bucket_name: str,
|
|
345
|
+
measurement_name: str
|
|
346
|
+
) -> Dict[str, Any]:
|
|
347
|
+
"""Get the time range for a measurement."""
|
|
348
|
+
query_api = self.client.query_api()
|
|
349
|
+
|
|
350
|
+
# Get earliest and latest timestamps
|
|
351
|
+
range_query = f'''
|
|
352
|
+
data = from(bucket: "{bucket_name}")
|
|
353
|
+
|> range(start: {self.default_time_range})
|
|
354
|
+
|> filter(fn: (r) => r._measurement == "{measurement_name}")
|
|
355
|
+
|
|
356
|
+
earliest = data |> first() |> keep(columns: ["_time"]) |> set(key: "stat", value: "earliest")
|
|
357
|
+
latest = data |> last() |> keep(columns: ["_time"]) |> set(key: "stat", value: "latest")
|
|
358
|
+
|
|
359
|
+
union(tables: [earliest, latest])
|
|
360
|
+
'''
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
result = await query_api.query(range_query)
|
|
364
|
+
time_range = {}
|
|
365
|
+
|
|
366
|
+
for table in result:
|
|
367
|
+
for record in table.records:
|
|
368
|
+
stat = record.values.get("stat")
|
|
369
|
+
time_value = record.get_time()
|
|
370
|
+
if stat and time_value:
|
|
371
|
+
time_range[stat] = time_value.isoformat()
|
|
372
|
+
|
|
373
|
+
return time_range
|
|
374
|
+
|
|
375
|
+
except Exception as e:
|
|
376
|
+
self.logger.warning(f"Could not get time range for {measurement_name}: {e}")
|
|
377
|
+
return {}
|
|
378
|
+
|
|
379
|
+
async def _get_sample_records(
|
|
380
|
+
self,
|
|
381
|
+
bucket_name: str,
|
|
382
|
+
measurement_name: str
|
|
383
|
+
) -> List[Dict[str, Any]]:
|
|
384
|
+
"""Get sample records from a measurement."""
|
|
385
|
+
query_api = self.client.query_api()
|
|
386
|
+
|
|
387
|
+
sample_query = f'''
|
|
388
|
+
from(bucket: "{bucket_name}")
|
|
389
|
+
|> range(start: {self.default_time_range})
|
|
390
|
+
|> filter(fn: (r) => r._measurement == "{measurement_name}")
|
|
391
|
+
|> limit(n: {self.max_sample_records})
|
|
392
|
+
'''
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
result = await query_api.query(sample_query)
|
|
396
|
+
sample_records = []
|
|
397
|
+
|
|
398
|
+
for table in result:
|
|
399
|
+
for record in table.records:
|
|
400
|
+
record_dict = {
|
|
401
|
+
"_time": record.get_time().isoformat() if record.get_time() else None,
|
|
402
|
+
"_measurement": record.get_measurement(),
|
|
403
|
+
"_field": record.get_field(),
|
|
404
|
+
"_value": record.get_value()
|
|
405
|
+
}
|
|
406
|
+
# Add tag values
|
|
407
|
+
for key, value in record.values.items():
|
|
408
|
+
if not key.startswith('_') and key not in ['result', 'table']:
|
|
409
|
+
record_dict[key] = value
|
|
410
|
+
|
|
411
|
+
sample_records.append(record_dict)
|
|
412
|
+
|
|
413
|
+
return sample_records[:self.max_sample_records]
|
|
414
|
+
|
|
415
|
+
except Exception as e:
|
|
416
|
+
self.logger.warning(f"Could not get sample records for {measurement_name}: {e}")
|
|
417
|
+
return []
|
|
418
|
+
|
|
419
|
+
def _convert_measurements_to_tables(
|
|
420
|
+
self,
|
|
421
|
+
measurements: List[InfluxMeasurementMetadata]
|
|
422
|
+
) -> List[TableMetadata]:
|
|
423
|
+
"""Convert InfluxDB measurements to TableMetadata format."""
|
|
424
|
+
tables = []
|
|
425
|
+
|
|
426
|
+
for measurement in measurements:
|
|
427
|
+
# Create columns list combining fields and tags
|
|
428
|
+
columns = []
|
|
429
|
+
|
|
430
|
+
# Add time column (always present in InfluxDB)
|
|
431
|
+
columns.append({
|
|
432
|
+
"name": "_time",
|
|
433
|
+
"type": "timestamp",
|
|
434
|
+
"nullable": False,
|
|
435
|
+
"description": "Timestamp of the data point"
|
|
436
|
+
})
|
|
437
|
+
|
|
438
|
+
# Add measurement column
|
|
439
|
+
columns.append({
|
|
440
|
+
"name": "_measurement",
|
|
441
|
+
"type": "string",
|
|
442
|
+
"nullable": False,
|
|
443
|
+
"description": "Measurement name"
|
|
444
|
+
})
|
|
445
|
+
|
|
446
|
+
# Add field columns
|
|
447
|
+
for field in measurement.fields:
|
|
448
|
+
columns.append({
|
|
449
|
+
"name": field["name"],
|
|
450
|
+
"type": field["type"],
|
|
451
|
+
"nullable": True,
|
|
452
|
+
"description": f"Field: {field['name']}"
|
|
453
|
+
})
|
|
454
|
+
|
|
455
|
+
# Add tag columns
|
|
456
|
+
for tag in measurement.tags:
|
|
457
|
+
columns.append({
|
|
458
|
+
"name": tag,
|
|
459
|
+
"type": "string",
|
|
460
|
+
"nullable": True,
|
|
461
|
+
"description": f"Tag: {tag}"
|
|
462
|
+
})
|
|
463
|
+
|
|
464
|
+
# Create table metadata
|
|
465
|
+
table_metadata = TableMetadata(
|
|
466
|
+
name=measurement.name,
|
|
467
|
+
schema=measurement.bucket,
|
|
468
|
+
columns=columns,
|
|
469
|
+
primary_keys=["_time"], # Time is essentially the primary key
|
|
470
|
+
foreign_keys=[], # InfluxDB doesn't have foreign keys
|
|
471
|
+
indexes=[], # InfluxDB handles indexing automatically
|
|
472
|
+
description=f"InfluxDB measurement in bucket '{measurement.bucket}'",
|
|
473
|
+
sample_data=measurement.sample_records
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
tables.append(table_metadata)
|
|
477
|
+
|
|
478
|
+
return tables
|
|
479
|
+
|
|
480
|
+
async def generate_query(
|
|
481
|
+
self,
|
|
482
|
+
natural_language_query: str,
|
|
483
|
+
target_tables: Optional[List[str]] = None,
|
|
484
|
+
query_type: str = "SELECT"
|
|
485
|
+
) -> Dict[str, Any]:
|
|
486
|
+
"""Generate Flux query from natural language."""
|
|
487
|
+
try:
|
|
488
|
+
# Get schema context for the query
|
|
489
|
+
schema_context = await self._get_schema_context_for_query(
|
|
490
|
+
natural_language_query, target_tables
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
# Build Flux query generation prompt
|
|
494
|
+
prompt = self._build_flux_query_prompt(
|
|
495
|
+
natural_language_query=natural_language_query,
|
|
496
|
+
schema_context=schema_context,
|
|
497
|
+
default_time_range=self.default_time_range
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
# Generate query using LLM
|
|
501
|
+
response = await self.llm_client.generate_response(
|
|
502
|
+
prompt=prompt,
|
|
503
|
+
model=self.model_name,
|
|
504
|
+
temperature=0.1
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
# Extract Flux query from response
|
|
508
|
+
flux_query = self._extract_flux_from_response(response.output)
|
|
509
|
+
|
|
510
|
+
# Validate query syntax (basic validation)
|
|
511
|
+
validation_result = await self._validate_flux_syntax(flux_query)
|
|
512
|
+
|
|
513
|
+
result = {
|
|
514
|
+
"query": flux_query,
|
|
515
|
+
"query_type": "flux",
|
|
516
|
+
"measurements_used": self._extract_measurements_from_query(flux_query),
|
|
517
|
+
"schema_context_used": len(schema_context),
|
|
518
|
+
"validation": validation_result,
|
|
519
|
+
"natural_language_input": natural_language_query,
|
|
520
|
+
"default_time_range": self.default_time_range
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
return result
|
|
524
|
+
|
|
525
|
+
except Exception as e:
|
|
526
|
+
self.logger.error(f"Failed to generate Flux query: {e}")
|
|
527
|
+
raise
|
|
528
|
+
|
|
529
|
+
def _build_flux_query_prompt(
|
|
530
|
+
self,
|
|
531
|
+
natural_language_query: str,
|
|
532
|
+
schema_context: List[Dict[str, Any]],
|
|
533
|
+
default_time_range: str
|
|
534
|
+
) -> str:
|
|
535
|
+
"""Build prompt for Flux query generation."""
|
|
536
|
+
prompt = f"""
|
|
537
|
+
You are an expert InfluxDB Flux query developer.
|
|
538
|
+
Generate a Flux query based on the natural language request and the provided schema information.
|
|
539
|
+
|
|
540
|
+
Natural Language Request: {natural_language_query}
|
|
541
|
+
|
|
542
|
+
Available Measurements and Schema:
|
|
543
|
+
"""
|
|
544
|
+
|
|
545
|
+
for i, context in enumerate(schema_context[:3], 1):
|
|
546
|
+
prompt += f"\n{i}. {context.get('content', '')}\n"
|
|
547
|
+
|
|
548
|
+
prompt += f"""
|
|
549
|
+
|
|
550
|
+
InfluxDB Flux Query Guidelines:
|
|
551
|
+
1. Always start with from(bucket: "bucket_name")
|
|
552
|
+
2. Use range() to specify time range (default: {default_time_range})
|
|
553
|
+
3. Use filter() to specify measurements and field conditions
|
|
554
|
+
4. Use aggregation functions like mean(), sum(), count(), etc. for time-series analysis
|
|
555
|
+
5. Use group() to group by tags or time windows
|
|
556
|
+
6. Use window() for time-based aggregations
|
|
557
|
+
7. Use |> (pipe) operator to chain operations
|
|
558
|
+
8. Return only the Flux query without explanations or markdown formatting
|
|
559
|
+
|
|
560
|
+
Example Flux query structure:
|
|
561
|
+
```
|
|
562
|
+
from(bucket: "my_bucket")
|
|
563
|
+
|> range(start: -1h)
|
|
564
|
+
|> filter(fn: (r) => r._measurement == "measurement_name")
|
|
565
|
+
|> filter(fn: (r) => r._field == "field_name")
|
|
566
|
+
|> aggregateWindow(every: 5m, fn: mean)
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
Default time range: {default_time_range}
|
|
570
|
+
|
|
571
|
+
Flux Query:"""
|
|
572
|
+
|
|
573
|
+
return prompt
|
|
574
|
+
|
|
575
|
+
def _extract_flux_from_response(self, response_text: str) -> str:
|
|
576
|
+
"""Extract Flux query from LLM response."""
|
|
577
|
+
# Remove markdown code blocks if present
|
|
578
|
+
if "```" in response_text:
|
|
579
|
+
lines = response_text.split('\n')
|
|
580
|
+
flux_lines = []
|
|
581
|
+
in_code_block = False
|
|
582
|
+
|
|
583
|
+
for line in lines:
|
|
584
|
+
if line.strip().startswith("```"):
|
|
585
|
+
in_code_block = not in_code_block
|
|
586
|
+
continue
|
|
587
|
+
elif in_code_block:
|
|
588
|
+
flux_lines.append(line)
|
|
589
|
+
|
|
590
|
+
return '\n'.join(flux_lines).strip()
|
|
591
|
+
else:
|
|
592
|
+
return response_text.strip()
|
|
593
|
+
|
|
594
|
+
def _extract_measurements_from_query(self, query: str) -> List[str]:
|
|
595
|
+
"""Extract measurement names from Flux query."""
|
|
596
|
+
# Find measurement names in filter expressions
|
|
597
|
+
pattern = r'r\._measurement\s*==\s*["\']([^"\']+)["\']'
|
|
598
|
+
matches = re.findall(pattern, query)
|
|
599
|
+
|
|
600
|
+
return list(set(matches))
|
|
601
|
+
|
|
602
|
+
async def _validate_flux_syntax(self, query: str) -> Dict[str, Any]:
|
|
603
|
+
"""Validate Flux query syntax."""
|
|
604
|
+
try:
|
|
605
|
+
# Basic validation - check for required components
|
|
606
|
+
if not query.strip():
|
|
607
|
+
return {
|
|
608
|
+
"valid": False,
|
|
609
|
+
"error": "Empty query",
|
|
610
|
+
"message": "Query cannot be empty"
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
if "from(bucket:" not in query:
|
|
614
|
+
return {
|
|
615
|
+
"valid": False,
|
|
616
|
+
"error": "Missing from() function",
|
|
617
|
+
"message": "Flux query must start with from(bucket:...)"
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
# Try to execute a dry run if possible
|
|
621
|
+
if self.client:
|
|
622
|
+
query_api = self.client.query_api()
|
|
623
|
+
# We could add a limit to make it safe
|
|
624
|
+
test_query = f"{query} |> limit(n: 1)"
|
|
625
|
+
try:
|
|
626
|
+
await query_api.query(test_query)
|
|
627
|
+
return {
|
|
628
|
+
"valid": True,
|
|
629
|
+
"error": None,
|
|
630
|
+
"message": "Query syntax is valid"
|
|
631
|
+
}
|
|
632
|
+
except Exception as e:
|
|
633
|
+
return {
|
|
634
|
+
"valid": False,
|
|
635
|
+
"error": str(e),
|
|
636
|
+
"message": "Query syntax validation failed"
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
return {
|
|
640
|
+
"valid": True,
|
|
641
|
+
"error": None,
|
|
642
|
+
"message": "Basic syntax validation passed"
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
except Exception as e:
|
|
646
|
+
return {
|
|
647
|
+
"valid": False,
|
|
648
|
+
"error": str(e),
|
|
649
|
+
"message": "Query validation error"
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
async def execute_query(self, query: str, limit: Optional[int] = 1000) -> Dict[str, Any]:
|
|
653
|
+
"""Execute Flux query against InfluxDB."""
|
|
654
|
+
try:
|
|
655
|
+
if not self.client:
|
|
656
|
+
await self.connect_database()
|
|
657
|
+
|
|
658
|
+
query_api = self.client.query_api()
|
|
659
|
+
|
|
660
|
+
# Add limit if specified and not already present
|
|
661
|
+
if limit and "limit(n:" not in query:
|
|
662
|
+
query = f"{query} |> limit(n: {limit})"
|
|
663
|
+
|
|
664
|
+
# Execute query
|
|
665
|
+
result = await query_api.query(query)
|
|
666
|
+
|
|
667
|
+
# Process results
|
|
668
|
+
records = []
|
|
669
|
+
columns = set()
|
|
670
|
+
|
|
671
|
+
for table in result:
|
|
672
|
+
for record in table.records:
|
|
673
|
+
record_dict = {}
|
|
674
|
+
|
|
675
|
+
# Add standard InfluxDB columns
|
|
676
|
+
if record.get_time():
|
|
677
|
+
record_dict["_time"] = record.get_time().isoformat()
|
|
678
|
+
if record.get_measurement():
|
|
679
|
+
record_dict["_measurement"] = record.get_measurement()
|
|
680
|
+
if record.get_field():
|
|
681
|
+
record_dict["_field"] = record.get_field()
|
|
682
|
+
if record.get_value() is not None:
|
|
683
|
+
record_dict["_value"] = record.get_value()
|
|
684
|
+
|
|
685
|
+
# Add all other values (tags, etc.)
|
|
686
|
+
for key, value in record.values.items():
|
|
687
|
+
if key not in ['result', 'table'] and not key.startswith('_start') and not key.startswith('_stop'):
|
|
688
|
+
record_dict[key] = value
|
|
689
|
+
|
|
690
|
+
records.append(record_dict)
|
|
691
|
+
columns.update(record_dict.keys())
|
|
692
|
+
|
|
693
|
+
return {
|
|
694
|
+
"success": True,
|
|
695
|
+
"data": records,
|
|
696
|
+
"columns": list(columns),
|
|
697
|
+
"record_count": len(records),
|
|
698
|
+
"query": query
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
except Exception as e:
|
|
702
|
+
self.logger.error(f"Flux query execution failed: {e}")
|
|
703
|
+
return {
|
|
704
|
+
"success": False,
|
|
705
|
+
"error": str(e),
|
|
706
|
+
"query": query
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
async def close(self):
|
|
710
|
+
"""Close InfluxDB client connection."""
|
|
711
|
+
if self.client:
|
|
712
|
+
await self.client.close()
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
class FluxQueryExecutionTool(AbstractTool):
|
|
716
|
+
"""Tool for executing Flux queries against InfluxDB."""
|
|
717
|
+
|
|
718
|
+
name = "execute_flux_query"
|
|
719
|
+
description = "Execute Flux queries against the InfluxDB database"
|
|
720
|
+
args_schema = FluxQueryExecutionArgs
|
|
721
|
+
|
|
722
|
+
def __init__(self, agent: InfluxDBAgent, **kwargs):
|
|
723
|
+
super().__init__(**kwargs)
|
|
724
|
+
self.agent = agent
|
|
725
|
+
|
|
726
|
+
async def _execute(
|
|
727
|
+
self,
|
|
728
|
+
query: str,
|
|
729
|
+
limit: Optional[int] = 1000,
|
|
730
|
+
timeout: int = 30
|
|
731
|
+
) -> ToolResult:
|
|
732
|
+
"""Execute Flux query."""
|
|
733
|
+
try:
|
|
734
|
+
result = await self.agent.execute_query(query, limit)
|
|
735
|
+
|
|
736
|
+
return ToolResult(
|
|
737
|
+
status="success" if result["success"] else "error",
|
|
738
|
+
result=result,
|
|
739
|
+
error=result.get("error"),
|
|
740
|
+
metadata={
|
|
741
|
+
"query": query,
|
|
742
|
+
"limit": limit,
|
|
743
|
+
"timeout": timeout
|
|
744
|
+
}
|
|
745
|
+
)
|
|
746
|
+
|
|
747
|
+
except Exception as e:
|
|
748
|
+
return ToolResult(
|
|
749
|
+
status="error",
|
|
750
|
+
result=None,
|
|
751
|
+
error=str(e),
|
|
752
|
+
metadata={"query": query}
|
|
753
|
+
)
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
class MeasurementExplorationTool(AbstractTool):
|
|
757
|
+
"""Tool for exploring InfluxDB measurements and their metadata."""
|
|
758
|
+
|
|
759
|
+
name = "explore_measurements"
|
|
760
|
+
description = "Explore available measurements, fields, and tags in InfluxDB"
|
|
761
|
+
|
|
762
|
+
class ExplorationArgs(AbstractToolArgsSchema):
|
|
763
|
+
"""Exploration arguments schema."""
|
|
764
|
+
bucket: Optional[str] = Field(default=None, description="Bucket to explore (optional)")
|
|
765
|
+
measurement: Optional[str] = Field(default=None, description="Specific measurement to explore")
|
|
766
|
+
show_sample_data: bool = Field(default=True, description="Include sample data in results")
|
|
767
|
+
|
|
768
|
+
args_schema = ExplorationArgs
|
|
769
|
+
|
|
770
|
+
def __init__(self, agent: InfluxDBAgent, **kwargs):
|
|
771
|
+
super().__init__(**kwargs)
|
|
772
|
+
self.agent = agent
|
|
773
|
+
|
|
774
|
+
async def _execute(
|
|
775
|
+
self,
|
|
776
|
+
bucket: Optional[str] = None,
|
|
777
|
+
measurement: Optional[str] = None,
|
|
778
|
+
show_sample_data: bool = True
|
|
779
|
+
) -> ToolResult:
|
|
780
|
+
"""Explore measurements in InfluxDB."""
|
|
781
|
+
try:
|
|
782
|
+
if not self.agent.schema_metadata:
|
|
783
|
+
await self.agent.extract_schema_metadata()
|
|
784
|
+
|
|
785
|
+
exploration_result = {
|
|
786
|
+
"buckets": [],
|
|
787
|
+
"measurements": [],
|
|
788
|
+
"total_measurements": 0
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
# Filter by bucket if specified
|
|
792
|
+
tables_to_explore = self.agent.schema_metadata.tables
|
|
793
|
+
if bucket:
|
|
794
|
+
tables_to_explore = [t for t in tables_to_explore if t.schema == bucket]
|
|
795
|
+
|
|
796
|
+
# Filter by measurement if specified
|
|
797
|
+
if measurement:
|
|
798
|
+
tables_to_explore = [t for t in tables_to_explore if t.name == measurement]
|
|
799
|
+
|
|
800
|
+
# Get unique buckets
|
|
801
|
+
buckets = list(set(t.schema for t in tables_to_explore))
|
|
802
|
+
exploration_result["buckets"] = buckets
|
|
803
|
+
|
|
804
|
+
# Build measurement details
|
|
805
|
+
for table in tables_to_explore:
|
|
806
|
+
measurement_info = {
|
|
807
|
+
"name": table.name,
|
|
808
|
+
"bucket": table.schema,
|
|
809
|
+
"description": table.description,
|
|
810
|
+
"fields": [col for col in table.columns if col["description"] and col["description"].startswith("Field:")],
|
|
811
|
+
"tags": [col for col in table.columns if col["description"] and col["description"].startswith("Tag:")],
|
|
812
|
+
"total_columns": len(table.columns)
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
if show_sample_data and table.sample_data:
|
|
816
|
+
measurement_info["sample_data"] = table.sample_data[:5] # Limit sample data
|
|
817
|
+
|
|
818
|
+
exploration_result["measurements"].append(measurement_info)
|
|
819
|
+
|
|
820
|
+
exploration_result["total_measurements"] = len(exploration_result["measurements"])
|
|
821
|
+
|
|
822
|
+
return ToolResult(
|
|
823
|
+
status="success",
|
|
824
|
+
result=exploration_result,
|
|
825
|
+
metadata={
|
|
826
|
+
"bucket_filter": bucket,
|
|
827
|
+
"measurement_filter": measurement,
|
|
828
|
+
"show_sample_data": show_sample_data
|
|
829
|
+
}
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
except Exception as e:
|
|
833
|
+
return ToolResult(
|
|
834
|
+
status="error",
|
|
835
|
+
result=None,
|
|
836
|
+
error=str(e),
|
|
837
|
+
metadata={
|
|
838
|
+
"bucket": bucket,
|
|
839
|
+
"measurement": measurement
|
|
840
|
+
}
|
|
841
|
+
)
|
|
842
|
+
|
|
843
|
+
|
|
844
|
+
# Factory function for creating InfluxDB agents
|
|
845
|
+
def create_influxdb_agent(
|
|
846
|
+
url: str,
|
|
847
|
+
token: str,
|
|
848
|
+
org: str,
|
|
849
|
+
bucket: str = None,
|
|
850
|
+
**kwargs
|
|
851
|
+
) -> InfluxDBAgent:
|
|
852
|
+
"""
|
|
853
|
+
Factory function to create InfluxDB agents.
|
|
854
|
+
|
|
855
|
+
Args:
|
|
856
|
+
url: InfluxDB URL (e.g., 'http://localhost:8086')
|
|
857
|
+
token: InfluxDB authentication token
|
|
858
|
+
org: InfluxDB organization
|
|
859
|
+
bucket: Default bucket name (optional)
|
|
860
|
+
**kwargs: Additional arguments for the agent
|
|
861
|
+
|
|
862
|
+
Returns:
|
|
863
|
+
Configured InfluxDBAgent instance
|
|
864
|
+
"""
|
|
865
|
+
return InfluxDBAgent(
|
|
866
|
+
connection_string=url,
|
|
867
|
+
token=token,
|
|
868
|
+
org=org,
|
|
869
|
+
bucket=bucket,
|
|
870
|
+
**kwargs
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
|
|
874
|
+
# Example usage
|
|
875
|
+
"""
|
|
876
|
+
# Create InfluxDB agent
|
|
877
|
+
influx_agent = create_influxdb_agent(
|
|
878
|
+
url='http://localhost:8086',
|
|
879
|
+
token='your-influxdb-token',
|
|
880
|
+
org='your-org',
|
|
881
|
+
bucket='your-bucket'
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
# Initialize schema
|
|
885
|
+
await influx_agent.initialize_schema()
|
|
886
|
+
|
|
887
|
+
# Generate query from natural language
|
|
888
|
+
result = await influx_agent.generate_query(
|
|
889
|
+
"Show me the average temperature over the last hour grouped by location"
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
# Execute the generated query
|
|
893
|
+
execution_result = await influx_agent.execute_query(result['query'])
|
|
894
|
+
|
|
895
|
+
# Explore available measurements
|
|
896
|
+
exploration_tool = MeasurementExplorationTool(agent=influx_agent)
|
|
897
|
+
exploration_result = await exploration_tool._arun(bucket='my_bucket')
|
|
898
|
+
"""
|