ai-parrot 0.17.2__cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentui/.prettierrc +15 -0
- agentui/QUICKSTART.md +272 -0
- agentui/README.md +59 -0
- agentui/env.example +16 -0
- agentui/jsconfig.json +14 -0
- agentui/package-lock.json +4242 -0
- agentui/package.json +34 -0
- agentui/scripts/postinstall/apply-patches.mjs +260 -0
- agentui/src/app.css +61 -0
- agentui/src/app.d.ts +13 -0
- agentui/src/app.html +12 -0
- agentui/src/components/LoadingSpinner.svelte +64 -0
- agentui/src/components/ThemeSwitcher.svelte +159 -0
- agentui/src/components/index.js +4 -0
- agentui/src/lib/api/bots.ts +60 -0
- agentui/src/lib/api/chat.ts +22 -0
- agentui/src/lib/api/http.ts +25 -0
- agentui/src/lib/components/BotCard.svelte +33 -0
- agentui/src/lib/components/ChatBubble.svelte +63 -0
- agentui/src/lib/components/Toast.svelte +21 -0
- agentui/src/lib/config.ts +20 -0
- agentui/src/lib/stores/auth.svelte.ts +73 -0
- agentui/src/lib/stores/theme.svelte.js +64 -0
- agentui/src/lib/stores/toast.svelte.ts +31 -0
- agentui/src/lib/utils/conversation.ts +39 -0
- agentui/src/routes/+layout.svelte +20 -0
- agentui/src/routes/+page.svelte +232 -0
- agentui/src/routes/login/+page.svelte +200 -0
- agentui/src/routes/talk/[agentId]/+page.svelte +297 -0
- agentui/src/routes/talk/[agentId]/+page.ts +7 -0
- agentui/static/README.md +1 -0
- agentui/svelte.config.js +11 -0
- agentui/tailwind.config.ts +53 -0
- agentui/tsconfig.json +3 -0
- agentui/vite.config.ts +10 -0
- ai_parrot-0.17.2.dist-info/METADATA +472 -0
- ai_parrot-0.17.2.dist-info/RECORD +535 -0
- ai_parrot-0.17.2.dist-info/WHEEL +6 -0
- ai_parrot-0.17.2.dist-info/entry_points.txt +2 -0
- ai_parrot-0.17.2.dist-info/licenses/LICENSE +21 -0
- ai_parrot-0.17.2.dist-info/top_level.txt +6 -0
- crew-builder/.prettierrc +15 -0
- crew-builder/QUICKSTART.md +259 -0
- crew-builder/README.md +113 -0
- crew-builder/env.example +17 -0
- crew-builder/jsconfig.json +14 -0
- crew-builder/package-lock.json +4182 -0
- crew-builder/package.json +37 -0
- crew-builder/scripts/postinstall/apply-patches.mjs +260 -0
- crew-builder/src/app.css +62 -0
- crew-builder/src/app.d.ts +13 -0
- crew-builder/src/app.html +12 -0
- crew-builder/src/components/LoadingSpinner.svelte +64 -0
- crew-builder/src/components/ThemeSwitcher.svelte +149 -0
- crew-builder/src/components/index.js +9 -0
- crew-builder/src/lib/api/bots.ts +60 -0
- crew-builder/src/lib/api/chat.ts +80 -0
- crew-builder/src/lib/api/client.ts +56 -0
- crew-builder/src/lib/api/crew/crew.ts +136 -0
- crew-builder/src/lib/api/index.ts +5 -0
- crew-builder/src/lib/api/o365/auth.ts +65 -0
- crew-builder/src/lib/auth/auth.ts +54 -0
- crew-builder/src/lib/components/AgentNode.svelte +43 -0
- crew-builder/src/lib/components/BotCard.svelte +33 -0
- crew-builder/src/lib/components/ChatBubble.svelte +67 -0
- crew-builder/src/lib/components/ConfigPanel.svelte +278 -0
- crew-builder/src/lib/components/JsonTreeNode.svelte +76 -0
- crew-builder/src/lib/components/JsonViewer.svelte +24 -0
- crew-builder/src/lib/components/MarkdownEditor.svelte +48 -0
- crew-builder/src/lib/components/ThemeToggle.svelte +36 -0
- crew-builder/src/lib/components/Toast.svelte +67 -0
- crew-builder/src/lib/components/Toolbar.svelte +157 -0
- crew-builder/src/lib/components/index.ts +10 -0
- crew-builder/src/lib/config.ts +8 -0
- crew-builder/src/lib/stores/auth.svelte.ts +228 -0
- crew-builder/src/lib/stores/crewStore.ts +369 -0
- crew-builder/src/lib/stores/theme.svelte.js +145 -0
- crew-builder/src/lib/stores/toast.svelte.ts +69 -0
- crew-builder/src/lib/utils/conversation.ts +39 -0
- crew-builder/src/lib/utils/markdown.ts +122 -0
- crew-builder/src/lib/utils/talkHistory.ts +47 -0
- crew-builder/src/routes/+layout.svelte +20 -0
- crew-builder/src/routes/+page.svelte +539 -0
- crew-builder/src/routes/agents/+page.svelte +247 -0
- crew-builder/src/routes/agents/[agentId]/+page.svelte +288 -0
- crew-builder/src/routes/agents/[agentId]/+page.ts +7 -0
- crew-builder/src/routes/builder/+page.svelte +204 -0
- crew-builder/src/routes/crew/ask/+page.svelte +1052 -0
- crew-builder/src/routes/crew/ask/+page.ts +1 -0
- crew-builder/src/routes/integrations/o365/+page.svelte +304 -0
- crew-builder/src/routes/login/+page.svelte +197 -0
- crew-builder/src/routes/talk/[agentId]/+page.svelte +487 -0
- crew-builder/src/routes/talk/[agentId]/+page.ts +7 -0
- crew-builder/static/README.md +1 -0
- crew-builder/svelte.config.js +11 -0
- crew-builder/tailwind.config.ts +53 -0
- crew-builder/tsconfig.json +3 -0
- crew-builder/vite.config.ts +10 -0
- mcp_servers/calculator_server.py +309 -0
- parrot/__init__.py +27 -0
- parrot/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/__pycache__/version.cpython-310.pyc +0 -0
- parrot/_version.py +34 -0
- parrot/a2a/__init__.py +48 -0
- parrot/a2a/client.py +658 -0
- parrot/a2a/discovery.py +89 -0
- parrot/a2a/mixin.py +257 -0
- parrot/a2a/models.py +376 -0
- parrot/a2a/server.py +770 -0
- parrot/agents/__init__.py +29 -0
- parrot/bots/__init__.py +12 -0
- parrot/bots/a2a_agent.py +19 -0
- parrot/bots/abstract.py +3139 -0
- parrot/bots/agent.py +1129 -0
- parrot/bots/basic.py +9 -0
- parrot/bots/chatbot.py +669 -0
- parrot/bots/data.py +1618 -0
- parrot/bots/database/__init__.py +5 -0
- parrot/bots/database/abstract.py +3071 -0
- parrot/bots/database/cache.py +286 -0
- parrot/bots/database/models.py +468 -0
- parrot/bots/database/prompts.py +154 -0
- parrot/bots/database/retries.py +98 -0
- parrot/bots/database/router.py +269 -0
- parrot/bots/database/sql.py +41 -0
- parrot/bots/db/__init__.py +6 -0
- parrot/bots/db/abstract.py +556 -0
- parrot/bots/db/bigquery.py +602 -0
- parrot/bots/db/cache.py +85 -0
- parrot/bots/db/documentdb.py +668 -0
- parrot/bots/db/elastic.py +1014 -0
- parrot/bots/db/influx.py +898 -0
- parrot/bots/db/mock.py +96 -0
- parrot/bots/db/multi.py +783 -0
- parrot/bots/db/prompts.py +185 -0
- parrot/bots/db/sql.py +1255 -0
- parrot/bots/db/tools.py +212 -0
- parrot/bots/document.py +680 -0
- parrot/bots/hrbot.py +15 -0
- parrot/bots/kb.py +170 -0
- parrot/bots/mcp.py +36 -0
- parrot/bots/orchestration/README.md +463 -0
- parrot/bots/orchestration/__init__.py +1 -0
- parrot/bots/orchestration/agent.py +155 -0
- parrot/bots/orchestration/crew.py +3330 -0
- parrot/bots/orchestration/fsm.py +1179 -0
- parrot/bots/orchestration/hr.py +434 -0
- parrot/bots/orchestration/storage/__init__.py +4 -0
- parrot/bots/orchestration/storage/memory.py +100 -0
- parrot/bots/orchestration/storage/mixin.py +119 -0
- parrot/bots/orchestration/verify.py +202 -0
- parrot/bots/product.py +204 -0
- parrot/bots/prompts/__init__.py +96 -0
- parrot/bots/prompts/agents.py +155 -0
- parrot/bots/prompts/data.py +216 -0
- parrot/bots/prompts/output_generation.py +8 -0
- parrot/bots/scraper/__init__.py +3 -0
- parrot/bots/scraper/models.py +122 -0
- parrot/bots/scraper/scraper.py +1173 -0
- parrot/bots/scraper/templates.py +115 -0
- parrot/bots/stores/__init__.py +5 -0
- parrot/bots/stores/local.py +172 -0
- parrot/bots/webdev.py +81 -0
- parrot/cli.py +17 -0
- parrot/clients/__init__.py +16 -0
- parrot/clients/base.py +1491 -0
- parrot/clients/claude.py +1191 -0
- parrot/clients/factory.py +129 -0
- parrot/clients/google.py +4567 -0
- parrot/clients/gpt.py +1975 -0
- parrot/clients/grok.py +432 -0
- parrot/clients/groq.py +986 -0
- parrot/clients/hf.py +582 -0
- parrot/clients/models.py +18 -0
- parrot/conf.py +395 -0
- parrot/embeddings/__init__.py +9 -0
- parrot/embeddings/base.py +157 -0
- parrot/embeddings/google.py +98 -0
- parrot/embeddings/huggingface.py +74 -0
- parrot/embeddings/openai.py +84 -0
- parrot/embeddings/processor.py +88 -0
- parrot/exceptions.c +13868 -0
- parrot/exceptions.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/exceptions.pxd +22 -0
- parrot/exceptions.pxi +15 -0
- parrot/exceptions.pyx +44 -0
- parrot/generators/__init__.py +29 -0
- parrot/generators/base.py +200 -0
- parrot/generators/html.py +293 -0
- parrot/generators/react.py +205 -0
- parrot/generators/streamlit.py +203 -0
- parrot/generators/template.py +105 -0
- parrot/handlers/__init__.py +4 -0
- parrot/handlers/agent.py +861 -0
- parrot/handlers/agents/__init__.py +1 -0
- parrot/handlers/agents/abstract.py +900 -0
- parrot/handlers/bots.py +338 -0
- parrot/handlers/chat.py +915 -0
- parrot/handlers/creation.sql +192 -0
- parrot/handlers/crew/ARCHITECTURE.md +362 -0
- parrot/handlers/crew/README_BOTMANAGER_PERSISTENCE.md +303 -0
- parrot/handlers/crew/README_REDIS_PERSISTENCE.md +366 -0
- parrot/handlers/crew/__init__.py +0 -0
- parrot/handlers/crew/handler.py +801 -0
- parrot/handlers/crew/models.py +229 -0
- parrot/handlers/crew/redis_persistence.py +523 -0
- parrot/handlers/jobs/__init__.py +10 -0
- parrot/handlers/jobs/job.py +384 -0
- parrot/handlers/jobs/mixin.py +627 -0
- parrot/handlers/jobs/models.py +115 -0
- parrot/handlers/jobs/worker.py +31 -0
- parrot/handlers/models.py +596 -0
- parrot/handlers/o365_auth.py +105 -0
- parrot/handlers/stream.py +337 -0
- parrot/interfaces/__init__.py +6 -0
- parrot/interfaces/aws.py +143 -0
- parrot/interfaces/credentials.py +113 -0
- parrot/interfaces/database.py +27 -0
- parrot/interfaces/google.py +1123 -0
- parrot/interfaces/hierarchy.py +1227 -0
- parrot/interfaces/http.py +651 -0
- parrot/interfaces/images/__init__.py +0 -0
- parrot/interfaces/images/plugins/__init__.py +24 -0
- parrot/interfaces/images/plugins/abstract.py +58 -0
- parrot/interfaces/images/plugins/analisys.py +148 -0
- parrot/interfaces/images/plugins/classify.py +150 -0
- parrot/interfaces/images/plugins/classifybase.py +182 -0
- parrot/interfaces/images/plugins/detect.py +150 -0
- parrot/interfaces/images/plugins/exif.py +1103 -0
- parrot/interfaces/images/plugins/hash.py +52 -0
- parrot/interfaces/images/plugins/vision.py +104 -0
- parrot/interfaces/images/plugins/yolo.py +66 -0
- parrot/interfaces/images/plugins/zerodetect.py +197 -0
- parrot/interfaces/o365.py +978 -0
- parrot/interfaces/onedrive.py +822 -0
- parrot/interfaces/sharepoint.py +1435 -0
- parrot/interfaces/soap.py +257 -0
- parrot/loaders/__init__.py +8 -0
- parrot/loaders/abstract.py +1131 -0
- parrot/loaders/audio.py +199 -0
- parrot/loaders/basepdf.py +53 -0
- parrot/loaders/basevideo.py +1568 -0
- parrot/loaders/csv.py +409 -0
- parrot/loaders/docx.py +116 -0
- parrot/loaders/epubloader.py +316 -0
- parrot/loaders/excel.py +199 -0
- parrot/loaders/factory.py +55 -0
- parrot/loaders/files/__init__.py +0 -0
- parrot/loaders/files/abstract.py +39 -0
- parrot/loaders/files/html.py +26 -0
- parrot/loaders/files/text.py +63 -0
- parrot/loaders/html.py +152 -0
- parrot/loaders/markdown.py +442 -0
- parrot/loaders/pdf.py +373 -0
- parrot/loaders/pdfmark.py +320 -0
- parrot/loaders/pdftables.py +506 -0
- parrot/loaders/ppt.py +476 -0
- parrot/loaders/qa.py +63 -0
- parrot/loaders/splitters/__init__.py +10 -0
- parrot/loaders/splitters/base.py +138 -0
- parrot/loaders/splitters/md.py +228 -0
- parrot/loaders/splitters/token.py +143 -0
- parrot/loaders/txt.py +26 -0
- parrot/loaders/video.py +89 -0
- parrot/loaders/videolocal.py +218 -0
- parrot/loaders/videounderstanding.py +377 -0
- parrot/loaders/vimeo.py +167 -0
- parrot/loaders/web.py +599 -0
- parrot/loaders/youtube.py +504 -0
- parrot/manager/__init__.py +5 -0
- parrot/manager/manager.py +1030 -0
- parrot/mcp/__init__.py +28 -0
- parrot/mcp/adapter.py +105 -0
- parrot/mcp/cli.py +174 -0
- parrot/mcp/client.py +119 -0
- parrot/mcp/config.py +75 -0
- parrot/mcp/integration.py +842 -0
- parrot/mcp/oauth.py +933 -0
- parrot/mcp/server.py +225 -0
- parrot/mcp/transports/__init__.py +3 -0
- parrot/mcp/transports/base.py +279 -0
- parrot/mcp/transports/grpc_session.py +163 -0
- parrot/mcp/transports/http.py +312 -0
- parrot/mcp/transports/mcp.proto +108 -0
- parrot/mcp/transports/quic.py +1082 -0
- parrot/mcp/transports/sse.py +330 -0
- parrot/mcp/transports/stdio.py +309 -0
- parrot/mcp/transports/unix.py +395 -0
- parrot/mcp/transports/websocket.py +547 -0
- parrot/memory/__init__.py +16 -0
- parrot/memory/abstract.py +209 -0
- parrot/memory/agent.py +32 -0
- parrot/memory/cache.py +175 -0
- parrot/memory/core.py +555 -0
- parrot/memory/file.py +153 -0
- parrot/memory/mem.py +131 -0
- parrot/memory/redis.py +613 -0
- parrot/models/__init__.py +46 -0
- parrot/models/basic.py +118 -0
- parrot/models/compliance.py +208 -0
- parrot/models/crew.py +395 -0
- parrot/models/detections.py +654 -0
- parrot/models/generation.py +85 -0
- parrot/models/google.py +223 -0
- parrot/models/groq.py +23 -0
- parrot/models/openai.py +30 -0
- parrot/models/outputs.py +285 -0
- parrot/models/responses.py +938 -0
- parrot/notifications/__init__.py +743 -0
- parrot/openapi/__init__.py +3 -0
- parrot/openapi/components.yaml +641 -0
- parrot/openapi/config.py +322 -0
- parrot/outputs/__init__.py +32 -0
- parrot/outputs/formats/__init__.py +108 -0
- parrot/outputs/formats/altair.py +359 -0
- parrot/outputs/formats/application.py +122 -0
- parrot/outputs/formats/base.py +351 -0
- parrot/outputs/formats/bokeh.py +356 -0
- parrot/outputs/formats/card.py +424 -0
- parrot/outputs/formats/chart.py +436 -0
- parrot/outputs/formats/d3.py +255 -0
- parrot/outputs/formats/echarts.py +310 -0
- parrot/outputs/formats/generators/__init__.py +0 -0
- parrot/outputs/formats/generators/abstract.py +61 -0
- parrot/outputs/formats/generators/panel.py +145 -0
- parrot/outputs/formats/generators/streamlit.py +86 -0
- parrot/outputs/formats/generators/terminal.py +63 -0
- parrot/outputs/formats/holoviews.py +310 -0
- parrot/outputs/formats/html.py +147 -0
- parrot/outputs/formats/jinja2.py +46 -0
- parrot/outputs/formats/json.py +87 -0
- parrot/outputs/formats/map.py +933 -0
- parrot/outputs/formats/markdown.py +172 -0
- parrot/outputs/formats/matplotlib.py +237 -0
- parrot/outputs/formats/mixins/__init__.py +0 -0
- parrot/outputs/formats/mixins/emaps.py +855 -0
- parrot/outputs/formats/plotly.py +341 -0
- parrot/outputs/formats/seaborn.py +310 -0
- parrot/outputs/formats/table.py +397 -0
- parrot/outputs/formats/template_report.py +138 -0
- parrot/outputs/formats/yaml.py +125 -0
- parrot/outputs/formatter.py +152 -0
- parrot/outputs/templates/__init__.py +95 -0
- parrot/pipelines/__init__.py +0 -0
- parrot/pipelines/abstract.py +210 -0
- parrot/pipelines/detector.py +124 -0
- parrot/pipelines/models.py +90 -0
- parrot/pipelines/planogram.py +3002 -0
- parrot/pipelines/table.sql +97 -0
- parrot/plugins/__init__.py +106 -0
- parrot/plugins/importer.py +80 -0
- parrot/py.typed +0 -0
- parrot/registry/__init__.py +18 -0
- parrot/registry/registry.py +594 -0
- parrot/scheduler/__init__.py +1189 -0
- parrot/scheduler/models.py +60 -0
- parrot/security/__init__.py +16 -0
- parrot/security/prompt_injection.py +268 -0
- parrot/security/security_events.sql +25 -0
- parrot/services/__init__.py +1 -0
- parrot/services/mcp/__init__.py +8 -0
- parrot/services/mcp/config.py +13 -0
- parrot/services/mcp/server.py +295 -0
- parrot/services/o365_remote_auth.py +235 -0
- parrot/stores/__init__.py +7 -0
- parrot/stores/abstract.py +352 -0
- parrot/stores/arango.py +1090 -0
- parrot/stores/bigquery.py +1377 -0
- parrot/stores/cache.py +106 -0
- parrot/stores/empty.py +10 -0
- parrot/stores/faiss_store.py +1157 -0
- parrot/stores/kb/__init__.py +9 -0
- parrot/stores/kb/abstract.py +68 -0
- parrot/stores/kb/cache.py +165 -0
- parrot/stores/kb/doc.py +325 -0
- parrot/stores/kb/hierarchy.py +346 -0
- parrot/stores/kb/local.py +457 -0
- parrot/stores/kb/prompt.py +28 -0
- parrot/stores/kb/redis.py +659 -0
- parrot/stores/kb/store.py +115 -0
- parrot/stores/kb/user.py +374 -0
- parrot/stores/models.py +59 -0
- parrot/stores/pgvector.py +3 -0
- parrot/stores/postgres.py +2853 -0
- parrot/stores/utils/__init__.py +0 -0
- parrot/stores/utils/chunking.py +197 -0
- parrot/telemetry/__init__.py +3 -0
- parrot/telemetry/mixin.py +111 -0
- parrot/template/__init__.py +3 -0
- parrot/template/engine.py +259 -0
- parrot/tools/__init__.py +23 -0
- parrot/tools/abstract.py +644 -0
- parrot/tools/agent.py +363 -0
- parrot/tools/arangodbsearch.py +537 -0
- parrot/tools/arxiv_tool.py +188 -0
- parrot/tools/calculator/__init__.py +3 -0
- parrot/tools/calculator/operations/__init__.py +38 -0
- parrot/tools/calculator/operations/calculus.py +80 -0
- parrot/tools/calculator/operations/statistics.py +76 -0
- parrot/tools/calculator/tool.py +150 -0
- parrot/tools/cloudwatch.py +988 -0
- parrot/tools/codeinterpreter/__init__.py +127 -0
- parrot/tools/codeinterpreter/executor.py +371 -0
- parrot/tools/codeinterpreter/internals.py +473 -0
- parrot/tools/codeinterpreter/models.py +643 -0
- parrot/tools/codeinterpreter/prompts.py +224 -0
- parrot/tools/codeinterpreter/tool.py +664 -0
- parrot/tools/company_info/__init__.py +6 -0
- parrot/tools/company_info/tool.py +1138 -0
- parrot/tools/correlationanalysis.py +437 -0
- parrot/tools/database/abstract.py +286 -0
- parrot/tools/database/bq.py +115 -0
- parrot/tools/database/cache.py +284 -0
- parrot/tools/database/models.py +95 -0
- parrot/tools/database/pg.py +343 -0
- parrot/tools/databasequery.py +1159 -0
- parrot/tools/db.py +1800 -0
- parrot/tools/ddgo.py +370 -0
- parrot/tools/decorators.py +271 -0
- parrot/tools/dftohtml.py +282 -0
- parrot/tools/document.py +549 -0
- parrot/tools/ecs.py +819 -0
- parrot/tools/edareport.py +368 -0
- parrot/tools/elasticsearch.py +1049 -0
- parrot/tools/employees.py +462 -0
- parrot/tools/epson/__init__.py +96 -0
- parrot/tools/excel.py +683 -0
- parrot/tools/file/__init__.py +13 -0
- parrot/tools/file/abstract.py +76 -0
- parrot/tools/file/gcs.py +378 -0
- parrot/tools/file/local.py +284 -0
- parrot/tools/file/s3.py +511 -0
- parrot/tools/file/tmp.py +309 -0
- parrot/tools/file/tool.py +501 -0
- parrot/tools/file_reader.py +129 -0
- parrot/tools/flowtask/__init__.py +19 -0
- parrot/tools/flowtask/tool.py +761 -0
- parrot/tools/gittoolkit.py +508 -0
- parrot/tools/google/__init__.py +18 -0
- parrot/tools/google/base.py +169 -0
- parrot/tools/google/tools.py +1251 -0
- parrot/tools/googlelocation.py +5 -0
- parrot/tools/googleroutes.py +5 -0
- parrot/tools/googlesearch.py +5 -0
- parrot/tools/googlesitesearch.py +5 -0
- parrot/tools/googlevoice.py +2 -0
- parrot/tools/gvoice.py +695 -0
- parrot/tools/ibisworld/README.md +225 -0
- parrot/tools/ibisworld/__init__.py +11 -0
- parrot/tools/ibisworld/tool.py +366 -0
- parrot/tools/jiratoolkit.py +1718 -0
- parrot/tools/manager.py +1098 -0
- parrot/tools/math.py +152 -0
- parrot/tools/metadata.py +476 -0
- parrot/tools/msteams.py +1621 -0
- parrot/tools/msword.py +635 -0
- parrot/tools/multidb.py +580 -0
- parrot/tools/multistoresearch.py +369 -0
- parrot/tools/networkninja.py +167 -0
- parrot/tools/nextstop/__init__.py +4 -0
- parrot/tools/nextstop/base.py +286 -0
- parrot/tools/nextstop/employee.py +733 -0
- parrot/tools/nextstop/store.py +462 -0
- parrot/tools/notification.py +435 -0
- parrot/tools/o365/__init__.py +42 -0
- parrot/tools/o365/base.py +295 -0
- parrot/tools/o365/bundle.py +522 -0
- parrot/tools/o365/events.py +554 -0
- parrot/tools/o365/mail.py +992 -0
- parrot/tools/o365/onedrive.py +497 -0
- parrot/tools/o365/sharepoint.py +641 -0
- parrot/tools/openapi_toolkit.py +904 -0
- parrot/tools/openweather.py +527 -0
- parrot/tools/pdfprint.py +1001 -0
- parrot/tools/powerbi.py +518 -0
- parrot/tools/powerpoint.py +1113 -0
- parrot/tools/pricestool.py +146 -0
- parrot/tools/products/__init__.py +246 -0
- parrot/tools/prophet_tool.py +171 -0
- parrot/tools/pythonpandas.py +630 -0
- parrot/tools/pythonrepl.py +910 -0
- parrot/tools/qsource.py +436 -0
- parrot/tools/querytoolkit.py +395 -0
- parrot/tools/quickeda.py +827 -0
- parrot/tools/resttool.py +553 -0
- parrot/tools/retail/__init__.py +0 -0
- parrot/tools/retail/bby.py +528 -0
- parrot/tools/sandboxtool.py +703 -0
- parrot/tools/sassie/__init__.py +352 -0
- parrot/tools/scraping/__init__.py +7 -0
- parrot/tools/scraping/docs/select.md +466 -0
- parrot/tools/scraping/documentation.md +1278 -0
- parrot/tools/scraping/driver.py +436 -0
- parrot/tools/scraping/models.py +576 -0
- parrot/tools/scraping/options.py +85 -0
- parrot/tools/scraping/orchestrator.py +517 -0
- parrot/tools/scraping/readme.md +740 -0
- parrot/tools/scraping/tool.py +3115 -0
- parrot/tools/seasonaldetection.py +642 -0
- parrot/tools/shell_tool/__init__.py +5 -0
- parrot/tools/shell_tool/actions.py +408 -0
- parrot/tools/shell_tool/engine.py +155 -0
- parrot/tools/shell_tool/models.py +322 -0
- parrot/tools/shell_tool/tool.py +442 -0
- parrot/tools/site_search.py +214 -0
- parrot/tools/textfile.py +418 -0
- parrot/tools/think.py +378 -0
- parrot/tools/toolkit.py +298 -0
- parrot/tools/webapp_tool.py +187 -0
- parrot/tools/whatif.py +1279 -0
- parrot/tools/workday/MULTI_WSDL_EXAMPLE.md +249 -0
- parrot/tools/workday/__init__.py +6 -0
- parrot/tools/workday/models.py +1389 -0
- parrot/tools/workday/tool.py +1293 -0
- parrot/tools/yfinance_tool.py +306 -0
- parrot/tools/zipcode.py +217 -0
- parrot/utils/__init__.py +2 -0
- parrot/utils/helpers.py +73 -0
- parrot/utils/parsers/__init__.py +5 -0
- parrot/utils/parsers/toml.c +12078 -0
- parrot/utils/parsers/toml.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/parsers/toml.pyx +21 -0
- parrot/utils/toml.py +11 -0
- parrot/utils/types.cpp +20936 -0
- parrot/utils/types.cpython-310-x86_64-linux-gnu.so +0 -0
- parrot/utils/types.pyx +213 -0
- parrot/utils/uv.py +11 -0
- parrot/version.py +10 -0
- parrot/yaml-rs/Cargo.lock +350 -0
- parrot/yaml-rs/Cargo.toml +19 -0
- parrot/yaml-rs/pyproject.toml +19 -0
- parrot/yaml-rs/python/yaml_rs/__init__.py +81 -0
- parrot/yaml-rs/src/lib.rs +222 -0
- requirements/docker-compose.yml +24 -0
- requirements/requirements-dev.txt +21 -0
|
@@ -0,0 +1,1718 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Jira Toolkit - A unified toolkit for Jira operations using pycontribs/jira.
|
|
3
|
+
|
|
4
|
+
This toolkit wraps common Jira actions as async tools, extending AbstractToolkit.
|
|
5
|
+
It supports multiple authentication modes on init: basic_auth, token_auth, and OAuth1.
|
|
6
|
+
|
|
7
|
+
Dependencies:
|
|
8
|
+
- jira (pycontribs/jira)
|
|
9
|
+
- pydantic
|
|
10
|
+
- navconfig (optional, for pulling default config values)
|
|
11
|
+
|
|
12
|
+
Example usage:
|
|
13
|
+
toolkit = JiraToolkit(
|
|
14
|
+
server_url="https://your-domain.atlassian.net",
|
|
15
|
+
auth_type="token_auth",
|
|
16
|
+
username="you@example.com",
|
|
17
|
+
token="<PAT>",
|
|
18
|
+
default_project="JRA"
|
|
19
|
+
)
|
|
20
|
+
tools = toolkit.get_tools()
|
|
21
|
+
issue = await toolkit.jira_get_issue("JRA-1330")
|
|
22
|
+
|
|
23
|
+
Notes:
|
|
24
|
+
- All public async methods become tools via AbstractToolkit.
|
|
25
|
+
- Methods are async but the underlying jira client is sync, so calls run via asyncio.to_thread.
|
|
26
|
+
- Each method returns JSON-serializable dicts/lists (using Issue.raw where possible).
|
|
27
|
+
"""
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
from typing import Any, Dict, List, Optional, Union, Literal
|
|
30
|
+
import os
|
|
31
|
+
import logging
|
|
32
|
+
import asyncio
|
|
33
|
+
import importlib
|
|
34
|
+
from pydantic import BaseModel, Field
|
|
35
|
+
import pandas as pd
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
# Optional config source; fall back to env vars if missing
|
|
39
|
+
from navconfig import config as nav_config # type: ignore
|
|
40
|
+
except Exception: # pragma: no cover - optional
|
|
41
|
+
nav_config = None
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
from jira import JIRA
|
|
45
|
+
except ImportError as e: # pragma: no cover - optional
|
|
46
|
+
raise ImportError(
|
|
47
|
+
"Please install the 'jira' package: pip install jira"
|
|
48
|
+
) from e
|
|
49
|
+
|
|
50
|
+
from .toolkit import AbstractToolkit
|
|
51
|
+
from .decorators import tool_schema
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# -----------------------------
|
|
55
|
+
# Input models (schemas)
|
|
56
|
+
# -----------------------------
|
|
57
|
+
STRUCTURED_OUTPUT_FIELD_SCHEMA: Dict[str, Any] = {
|
|
58
|
+
"type": "object",
|
|
59
|
+
"properties": {
|
|
60
|
+
"include": {
|
|
61
|
+
"type": "array",
|
|
62
|
+
"items": {"type": "string"},
|
|
63
|
+
"description": "Whitelist of dot-paths to include"
|
|
64
|
+
},
|
|
65
|
+
"mapping": {
|
|
66
|
+
"type": "object",
|
|
67
|
+
"description": "dest_key -> dot-path mapping",
|
|
68
|
+
"additionalProperties": {"type": "string"}
|
|
69
|
+
},
|
|
70
|
+
"model_path": {
|
|
71
|
+
"type": "string",
|
|
72
|
+
"description": "Dotted path to a Pydantic BaseModel subclass"
|
|
73
|
+
},
|
|
74
|
+
"strict": {
|
|
75
|
+
"type": "boolean",
|
|
76
|
+
"description": "If True, missing paths raise; otherwise they become None"
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class StructuredOutputOptions(BaseModel):
|
|
83
|
+
"""Options to shape the output of Jira items into either a whitelist or a Pydantic model.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
You can:
|
|
87
|
+
- provide `include` as a list of dot-paths to keep (e.g., ["key", "fields.summary", "fields.assignee.displayName"]).
|
|
88
|
+
- OR provide `mapping` as {dest_key: dot_path} to rename/flatten fields.
|
|
89
|
+
- OR provide `model_path` as a dotted import path to a BaseModel subclass. We will validate and return `model_dump()`.
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
If more than one is provided, precedence is: mapping > include > model_path (mapping/include are applied before model).
|
|
93
|
+
"""
|
|
94
|
+
include: Optional[List[str]] = Field(default=None, description="Whitelist of dot-paths to include")
|
|
95
|
+
mapping: Optional[Dict[str, str]] = Field(default=None, description="dest_key -> dot-path mapping")
|
|
96
|
+
model_path: Optional[str] = Field(default=None, description="Dotted path to a Pydantic BaseModel subclass")
|
|
97
|
+
strict: bool = Field(default=False, description="If True, missing paths raise; otherwise they become None")
|
|
98
|
+
|
|
99
|
+
# =============================================================================
|
|
100
|
+
# Field Presets for Efficiency
|
|
101
|
+
# =============================================================================
|
|
102
|
+
|
|
103
|
+
FIELD_PRESETS = {
|
|
104
|
+
# Minimal fields for counting
|
|
105
|
+
"count": "key,assignee,reporter,status,priority,issuetype,project,created",
|
|
106
|
+
|
|
107
|
+
# Fields for listing/browsing
|
|
108
|
+
"list": "key,summary,assignee,status,priority,issuetype,project,created,updated",
|
|
109
|
+
|
|
110
|
+
# Fields for detailed analysis
|
|
111
|
+
"analysis": (
|
|
112
|
+
"key,summary,description,assignee,reporter,status,priority,issuetype,"
|
|
113
|
+
"project,created,updated,resolutiondate,duedate,labels,components,"
|
|
114
|
+
"timeoriginalestimate,timespent,customfield_10016" # story points
|
|
115
|
+
),
|
|
116
|
+
|
|
117
|
+
# All fields
|
|
118
|
+
"all": "*all",
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Type hint for presets
|
|
122
|
+
FieldPreset = Literal["count", "list", "analysis", "all"]
|
|
123
|
+
|
|
124
|
+
class JiraInput(BaseModel):
|
|
125
|
+
"""Default input for Jira tools: holds auth + default project context.
|
|
126
|
+
|
|
127
|
+
You usually do **not** pass this into every call; it's used to configure the
|
|
128
|
+
toolkit on initialization. It's defined here for consistency and as a type
|
|
129
|
+
you can reuse when wiring the toolkit into agents.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
server_url: str = Field(description="Base URL for Jira server (e.g., https://your.atlassian.net)")
|
|
133
|
+
auth_type: str = Field(
|
|
134
|
+
description="Authentication type: 'basic_auth', 'token_auth', or 'oauth'",
|
|
135
|
+
default="token_auth",
|
|
136
|
+
)
|
|
137
|
+
username: Optional[str] = Field(default=None, description="Username (email) for basic/token auth")
|
|
138
|
+
password: Optional[str] = Field(default=None, description="Password for basic auth (or API token)")
|
|
139
|
+
token: Optional[str] = Field(default=None, description="Personal Access Token for token_auth")
|
|
140
|
+
|
|
141
|
+
# OAuth1 params (pycontribs JIRA OAuth1)
|
|
142
|
+
oauth_consumer_key: Optional[str] = None
|
|
143
|
+
oauth_key_cert: Optional[str] = Field(default=None, description="PEM private key content or path")
|
|
144
|
+
oauth_access_token: Optional[str] = None
|
|
145
|
+
oauth_access_token_secret: Optional[str] = None
|
|
146
|
+
|
|
147
|
+
# Default project context
|
|
148
|
+
default_project: Optional[str] = Field(default=None, description="Default project key, e.g., 'JRA'")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class GetIssueInput(BaseModel):
|
|
152
|
+
"""Input for getting a single issue."""
|
|
153
|
+
issue: str = Field(description="Issue key or id, e.g., 'JRA-1330'")
|
|
154
|
+
fields: Optional[str] = Field(default=None, description="Fields to fetch (comma-separated) or '*' ")
|
|
155
|
+
expand: Optional[str] = Field(default=None, description="Entities to expand, e.g. 'renderedFields' ")
|
|
156
|
+
structured: Optional[StructuredOutputOptions] = Field(
|
|
157
|
+
default=None,
|
|
158
|
+
description="Optional structured output mapping",
|
|
159
|
+
json_schema_extra=STRUCTURED_OUTPUT_FIELD_SCHEMA
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class SearchIssuesInput(BaseModel):
|
|
164
|
+
"""Input for searching issues with JQL."""
|
|
165
|
+
jql: str = Field(description="JQL query, e.g. 'project=PROJ and assignee != currentUser()'")
|
|
166
|
+
start_at: int = Field(default=0, description="Start index for pagination")
|
|
167
|
+
max_results: Optional[int] = Field(
|
|
168
|
+
default=100,
|
|
169
|
+
description=(
|
|
170
|
+
"Max results to return. Set to None to fetch all matching issues. "
|
|
171
|
+
"Jira supports up to 1000 per page. "
|
|
172
|
+
"Default 100 is for browsing; use None for complete counts."
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
fields: Optional[str] = Field(
|
|
176
|
+
default=None,
|
|
177
|
+
description=(
|
|
178
|
+
"Fields to return (comma-separated). Use minimal fields for efficiency: "
|
|
179
|
+
"'key,assignee,status,priority' for counts, "
|
|
180
|
+
"'key,summary,assignee,status,created' for listings, "
|
|
181
|
+
"'*all' or None for full details. "
|
|
182
|
+
"Fewer fields = faster response and smaller context."
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
expand: Optional[str] = Field(
|
|
186
|
+
default=None,
|
|
187
|
+
description="Expand options (changelog, renderedFields, etc.)"
|
|
188
|
+
)
|
|
189
|
+
structured: Optional[StructuredOutputOptions] = Field(
|
|
190
|
+
default=None,
|
|
191
|
+
description="Optional structured output mapping",
|
|
192
|
+
json_schema_extra=STRUCTURED_OUTPUT_FIELD_SCHEMA
|
|
193
|
+
)
|
|
194
|
+
# Options for efficient handling
|
|
195
|
+
json_result: bool = Field(
|
|
196
|
+
default=True,
|
|
197
|
+
description=(
|
|
198
|
+
"Return results as a JSON object instead of a list of issues. "
|
|
199
|
+
"Set True when you need to do aggregations, grouping, or complex analysis."
|
|
200
|
+
)
|
|
201
|
+
)
|
|
202
|
+
store_as_dataframe: bool = Field(
|
|
203
|
+
default=False,
|
|
204
|
+
description=(
|
|
205
|
+
"Store results in a shared DataFrame for analysis with PythonPandasTool. "
|
|
206
|
+
"Set True when you need to do aggregations, grouping, or complex analysis."
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
dataframe_name: Optional[str] = Field(
|
|
210
|
+
default=None,
|
|
211
|
+
description="Name for the stored DataFrame. Defaults to 'jira_issues'."
|
|
212
|
+
)
|
|
213
|
+
summary_only: bool = Field(
|
|
214
|
+
default=False,
|
|
215
|
+
description=(
|
|
216
|
+
"Return only summary statistics (counts by assignee, status, etc.) "
|
|
217
|
+
"instead of raw issues. Ideal for 'how many' or 'count by' queries. "
|
|
218
|
+
"Drastically reduces context window usage."
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class CountIssuesInput(BaseModel):
|
|
225
|
+
"""Optimized input for counting issues - requests minimal fields."""
|
|
226
|
+
|
|
227
|
+
jql: str = Field(
|
|
228
|
+
description="JQL query to count issues"
|
|
229
|
+
)
|
|
230
|
+
group_by: Optional[List[str]] = Field(
|
|
231
|
+
default=None,
|
|
232
|
+
description=(
|
|
233
|
+
"Fields to group counts by. Options: "
|
|
234
|
+
"'assignee', 'reporter', 'status', 'priority', 'issuetype', 'project'. "
|
|
235
|
+
"Example: ['assignee', 'status'] for count by user and status."
|
|
236
|
+
)
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class AggregateJiraDataInput(BaseModel):
|
|
241
|
+
"""Input for aggregating stored Jira data."""
|
|
242
|
+
|
|
243
|
+
dataframe_name: str = Field(
|
|
244
|
+
default="jira_issues",
|
|
245
|
+
description="Name of the DataFrame to aggregate"
|
|
246
|
+
)
|
|
247
|
+
group_by: List[str] = Field(
|
|
248
|
+
description="Columns to group by, e.g. ['assignee_name', 'status']"
|
|
249
|
+
)
|
|
250
|
+
aggregations: Dict[str, str] = Field(
|
|
251
|
+
default={"key": "count"},
|
|
252
|
+
description=(
|
|
253
|
+
"Aggregations to perform. Format: {column: agg_func}. "
|
|
254
|
+
"Example: {'key': 'count', 'story_points': 'sum'}"
|
|
255
|
+
)
|
|
256
|
+
)
|
|
257
|
+
sort_by: Optional[str] = Field(
|
|
258
|
+
default=None,
|
|
259
|
+
description="Column to sort results by"
|
|
260
|
+
)
|
|
261
|
+
ascending: bool = Field(
|
|
262
|
+
default=False,
|
|
263
|
+
description="Sort order"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
class TransitionIssueInput(BaseModel):
|
|
268
|
+
"""Input for transitioning an issue."""
|
|
269
|
+
issue: str = Field(description="Issue key or id")
|
|
270
|
+
transition: Union[str, int] = Field(description="Transition id or name (e.g., '5' or 'Done')")
|
|
271
|
+
fields: Optional[Dict[str, Any]] = Field(default=None, description="Extra fields to set on transition")
|
|
272
|
+
assignee: Optional[Dict[str, Any]] = Field(default=None, description="Assignee dict, e.g., {'name': 'pm_user'}")
|
|
273
|
+
resolution: Optional[Dict[str, Any]] = Field(default=None, description="Resolution dict, e.g., {'id': '3'}")
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
class AddAttachmentInput(BaseModel):
|
|
277
|
+
"""Input for adding an attachment to an issue."""
|
|
278
|
+
issue: str = Field(description="Issue key or id")
|
|
279
|
+
attachment: str = Field(description="Path to attachment file on disk")
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
class AssignIssueInput(BaseModel):
|
|
283
|
+
"""Input for assigning an issue to a user."""
|
|
284
|
+
issue: str = Field(description="Issue key or id")
|
|
285
|
+
assignee: str = Field(description="Account id or username (depends on Jira cloud/server)")
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
class CreateIssueInput(BaseModel):
|
|
289
|
+
"""Input for creating a new issue."""
|
|
290
|
+
project: str = Field(
|
|
291
|
+
description="Project key, e.g. 'NAV' or project id"
|
|
292
|
+
)
|
|
293
|
+
summary: str = Field(
|
|
294
|
+
description="Issue summary/title"
|
|
295
|
+
)
|
|
296
|
+
issuetype: str = Field(
|
|
297
|
+
default="Task",
|
|
298
|
+
description="Issue type name: 'Epic', 'Story', 'Bug', 'Task', 'Sub-task', etc."
|
|
299
|
+
)
|
|
300
|
+
description: Optional[str] = Field(
|
|
301
|
+
default=None,
|
|
302
|
+
description="Issue description"
|
|
303
|
+
)
|
|
304
|
+
assignee: Optional[str] = Field(
|
|
305
|
+
default=None,
|
|
306
|
+
description="Assignee account ID or username"
|
|
307
|
+
)
|
|
308
|
+
priority: Optional[str] = Field(
|
|
309
|
+
default=None,
|
|
310
|
+
description="Priority name: 'Highest', 'High', 'Medium', 'Low', 'Lowest'"
|
|
311
|
+
)
|
|
312
|
+
labels: Optional[List[str]] = Field(
|
|
313
|
+
default=None,
|
|
314
|
+
description="Labels list, e.g. ['backend', 'urgent']"
|
|
315
|
+
)
|
|
316
|
+
due_date: Optional[str] = Field(
|
|
317
|
+
default=None,
|
|
318
|
+
description="Due date in YYYY-MM-DD format"
|
|
319
|
+
)
|
|
320
|
+
parent: Optional[str] = Field(
|
|
321
|
+
default=None,
|
|
322
|
+
description="Parent issue key for sub-tasks or stories under epics"
|
|
323
|
+
)
|
|
324
|
+
original_estimate: Optional[str] = Field(
|
|
325
|
+
default=None,
|
|
326
|
+
description="Original time estimate, e.g. '8h', '2d', '30m'"
|
|
327
|
+
)
|
|
328
|
+
# Generic fields for any other issue data
|
|
329
|
+
fields: Optional[Dict[str, Any]] = Field(
|
|
330
|
+
default=None,
|
|
331
|
+
description="Additional fields dict for custom or less common fields"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
class UpdateIssueInput(BaseModel):
|
|
336
|
+
"""Input for updating an existing issue."""
|
|
337
|
+
issue: str = Field(description="Issue key or id")
|
|
338
|
+
summary: Optional[str] = Field(default=None, description="New summary")
|
|
339
|
+
description: Optional[str] = Field(default=None, description="New description")
|
|
340
|
+
assignee: Optional[Dict[str, Any]] = Field(default=None, description="New assignee dict, e.g. {'accountId': '...'}")
|
|
341
|
+
|
|
342
|
+
# New fields
|
|
343
|
+
acceptance_criteria: Optional[str] = Field(
|
|
344
|
+
default=None,
|
|
345
|
+
description="Acceptance criteria text (often stored in a custom field)"
|
|
346
|
+
)
|
|
347
|
+
original_estimate: Optional[str] = Field(
|
|
348
|
+
default=None,
|
|
349
|
+
description="Original time estimate, e.g. '2h', '1d', '30m'"
|
|
350
|
+
)
|
|
351
|
+
time_tracking: Optional[Dict[str, str]] = Field(
|
|
352
|
+
default=None,
|
|
353
|
+
description="Time tracking dict, e.g. {'originalEstimate': '2h', 'remainingEstimate': '1h'}"
|
|
354
|
+
)
|
|
355
|
+
affected_versions: Optional[List[Dict[str, str]]] = Field(
|
|
356
|
+
default=None,
|
|
357
|
+
description="Affected versions list, e.g. [{'name': '1.0'}, {'name': '2.0'}]"
|
|
358
|
+
)
|
|
359
|
+
due_date: Optional[str] = Field(
|
|
360
|
+
default=None,
|
|
361
|
+
description="Due date in YYYY-MM-DD format"
|
|
362
|
+
)
|
|
363
|
+
labels: Optional[List[str]] = Field(
|
|
364
|
+
default=None,
|
|
365
|
+
description="Labels list, e.g. ['backend', 'priority']"
|
|
366
|
+
)
|
|
367
|
+
issuetype: Optional[Dict[str, str]] = Field(
|
|
368
|
+
default=None,
|
|
369
|
+
description="Issue type dict, e.g. {'name': 'Bug'} or {'id': '10001'}"
|
|
370
|
+
)
|
|
371
|
+
priority: Optional[Dict[str, str]] = Field(
|
|
372
|
+
default=None,
|
|
373
|
+
description="Priority dict, e.g. {'name': 'High'} or {'id': '2'}"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
# Generic fields for any other updates
|
|
377
|
+
fields: Optional[Dict[str, Any]] = Field(default=None, description="Arbitrary field updates dict")
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
class FindIssuesByAssigneeInput(BaseModel):
|
|
381
|
+
"""Input for finding issues assigned to a given user."""
|
|
382
|
+
assignee: str = Field(description="Assignee identifier (e.g., 'admin' or accountId)")
|
|
383
|
+
project: Optional[str] = Field(default=None, description="Restrict to project key")
|
|
384
|
+
max_results: int = Field(default=50, description="Max results")
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
class GetTransitionsInput(BaseModel):
|
|
388
|
+
"""Input for getting available transitions for an issue."""
|
|
389
|
+
issue: str = Field(description="Issue key or id")
|
|
390
|
+
expand: Optional[str] = Field(default=None, description="Expand options, e.g. 'transitions.fields'")
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
class AddCommentInput(BaseModel):
|
|
394
|
+
"""Input for adding a comment to an issue."""
|
|
395
|
+
issue: str = Field(description="Issue key or id")
|
|
396
|
+
body: str = Field(description="Comment body text")
|
|
397
|
+
is_internal: bool = Field(default=False, description="If true, mark as internal (Service Desk)")
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
class AddWorklogInput(BaseModel):
|
|
401
|
+
"""Input for adding a worklog to an issue."""
|
|
402
|
+
issue: str = Field(description="Issue key or id")
|
|
403
|
+
time_spent: str = Field(description="Time spent, e.g. '2h', '30m'")
|
|
404
|
+
comment: Optional[str] = Field(default=None, description="Worklog comment")
|
|
405
|
+
started: Optional[str] = Field(default=None, description="Date started (ISO-8601 or similar)")
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
class GetIssueTypesInput(BaseModel):
|
|
409
|
+
"""Input for listing issue types."""
|
|
410
|
+
project: Optional[str] = Field(default=None, description="Project key to filter by. If omitted, returns all available types.")
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
class SearchUsersInput(BaseModel):
|
|
415
|
+
"""Input for searching users."""
|
|
416
|
+
user: Optional[str] = Field(default=None, description="String to match usernames, name or email against.")
|
|
417
|
+
start_at: int = Field(default=0, description="Index of the first user to return.")
|
|
418
|
+
max_results: int = Field(default=50, description="Maximum number of users to return.")
|
|
419
|
+
include_active: bool = Field(default=True, description="True to include active users.")
|
|
420
|
+
include_inactive: bool = Field(default=False, description="True to include inactive users.")
|
|
421
|
+
query: Optional[str] = Field(default=None, description="Search term. It can just be the email.")
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
class GetProjectsInput(BaseModel):
|
|
425
|
+
"""Input for listing projects."""
|
|
426
|
+
pass
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
# -----------------------------
|
|
430
|
+
# Toolkit implementation
|
|
431
|
+
# -----------------------------
|
|
432
|
+
class JiraToolkit(AbstractToolkit):
|
|
433
|
+
"""Toolkit for interacting with Jira via pycontribs/jira.
|
|
434
|
+
|
|
435
|
+
Provides methods for:
|
|
436
|
+
- Getting an issue
|
|
437
|
+
- Searching issues
|
|
438
|
+
- Transitioning issues
|
|
439
|
+
- Adding attachments
|
|
440
|
+
- Assigning issues
|
|
441
|
+
- Creating and updating issues
|
|
442
|
+
- Finding issues by assignee
|
|
443
|
+
- Counting issues
|
|
444
|
+
- Aggregating stored Jira data
|
|
445
|
+
|
|
446
|
+
Authentication modes:
|
|
447
|
+
- basic_auth: username + password
|
|
448
|
+
- token_auth: personal access token (preferred for Jira Cloud)
|
|
449
|
+
- oauth: OAuth1 parameters
|
|
450
|
+
|
|
451
|
+
Configuration precedence for init parameters:
|
|
452
|
+
1) Explicit kwargs to __init__
|
|
453
|
+
2) navconfig.config keys (if available)
|
|
454
|
+
3) Environment variables
|
|
455
|
+
|
|
456
|
+
Recognized config/env keys:
|
|
457
|
+
JIRA_SERVER_URL, JIRA_AUTH_TYPE, JIRA_USERNAME, JIRA_PASSWORD, JIRA_TOKEN,
|
|
458
|
+
JIRA_OAUTH_CONSUMER_KEY, JIRA_OAUTH_KEY_CERT, JIRA_OAUTH_ACCESS_TOKEN,
|
|
459
|
+
JIRA_OAUTH_ACCESS_TOKEN_SECRET, JIRA_DEFAULT_PROJECT
|
|
460
|
+
|
|
461
|
+
Field presets for efficiency:
|
|
462
|
+
count: key,assignee,reporter,status,priority,issuetype,project,created
|
|
463
|
+
list: key,summary,assignee,status,priority,issuetype,project,created,updated
|
|
464
|
+
analysis: key,summary,description,assignee,reporter,status,priority,issuetype,project,created,updated,resolutiondate,duedate,labels,components,timeoriginalestimate,timespent,customfield_10016
|
|
465
|
+
all: *all
|
|
466
|
+
|
|
467
|
+
Usage:
|
|
468
|
+
-----
|
|
469
|
+
# For counts - efficient, minimal context
|
|
470
|
+
jira.jira_count_issues(
|
|
471
|
+
jql="project = NAV AND status = Open",
|
|
472
|
+
group_by=["assignee", "status"]
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# For analysis - store in DataFrame
|
|
476
|
+
jira.jira_search_issues(
|
|
477
|
+
jql="project = NAV",
|
|
478
|
+
max_results=1000,
|
|
479
|
+
fields="key,assignee,status,created", # Only what you need!
|
|
480
|
+
store_as_dataframe=True,
|
|
481
|
+
summary_only=True # Just counts in response
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
"""
|
|
485
|
+
|
|
486
|
+
# Expose the default input schema as metadata (optional)
|
|
487
|
+
input_class = JiraInput
|
|
488
|
+
_tool_manager: Optional[ToolManager] = None
|
|
489
|
+
|
|
490
|
+
def __init__(
|
|
491
|
+
self,
|
|
492
|
+
server_url: Optional[str] = None,
|
|
493
|
+
auth_type: Optional[str] = None,
|
|
494
|
+
username: Optional[str] = None,
|
|
495
|
+
password: Optional[str] = None,
|
|
496
|
+
token: Optional[str] = None,
|
|
497
|
+
oauth_consumer_key: Optional[str] = None,
|
|
498
|
+
oauth_key_cert: Optional[str] = None,
|
|
499
|
+
oauth_access_token: Optional[str] = None,
|
|
500
|
+
oauth_access_token_secret: Optional[str] = None,
|
|
501
|
+
default_project: Optional[str] = None,
|
|
502
|
+
**kwargs,
|
|
503
|
+
):
|
|
504
|
+
super().__init__(**kwargs)
|
|
505
|
+
|
|
506
|
+
# Pull defaults from navconfig or env vars
|
|
507
|
+
def _cfg(key: str, default: Optional[str] = None) -> Optional[str]:
|
|
508
|
+
if (nav_config is not None) and hasattr(nav_config, "get"):
|
|
509
|
+
val = nav_config.get(key)
|
|
510
|
+
if val is not None:
|
|
511
|
+
return str(val)
|
|
512
|
+
return os.getenv(key, default)
|
|
513
|
+
|
|
514
|
+
self.server_url = server_url or _cfg("JIRA_INSTANCE") or ""
|
|
515
|
+
if not self.server_url:
|
|
516
|
+
raise ValueError(
|
|
517
|
+
"Jira server_url is required (e.g., https://your.atlassian.net)"
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
self.logger = logging.getLogger(__name__)
|
|
521
|
+
self.auth_type = (auth_type or _cfg("JIRA_AUTH_TYPE", "token_auth")).lower()
|
|
522
|
+
self.username = username or _cfg("JIRA_USERNAME")
|
|
523
|
+
self.password = password or _cfg("JIRA_PASSWORD") or _cfg("JIRA_API_TOKEN")
|
|
524
|
+
self.token = token or _cfg("JIRA_SECRET_TOKEN")
|
|
525
|
+
|
|
526
|
+
self.oauth_consumer_key = oauth_consumer_key or _cfg("JIRA_OAUTH_CONSUMER_KEY")
|
|
527
|
+
self.oauth_key_cert = oauth_key_cert or _cfg("JIRA_OAUTH_KEY_CERT")
|
|
528
|
+
self.oauth_access_token = oauth_access_token or _cfg("JIRA_OAUTH_ACCESS_TOKEN")
|
|
529
|
+
self.oauth_access_token_secret = oauth_access_token_secret or _cfg("JIRA_OAUTH_ACCESS_TOKEN_SECRET")
|
|
530
|
+
|
|
531
|
+
self.default_project = default_project or _cfg("JIRA_DEFAULT_PROJECT")
|
|
532
|
+
|
|
533
|
+
# Create Jira client
|
|
534
|
+
self.jira = self._init_jira_client()
|
|
535
|
+
|
|
536
|
+
# -----------------------------
|
|
537
|
+
# Client init helpers
|
|
538
|
+
# -----------------------------
|
|
539
|
+
def _init_jira_client(self) -> JIRA:
|
|
540
|
+
"""Instantiate the pycontribs JIRA client according to auth_type."""
|
|
541
|
+
options: Dict[str, Any] = {
|
|
542
|
+
"server": self.server_url,
|
|
543
|
+
"verify": False,
|
|
544
|
+
'headers': {
|
|
545
|
+
'Accept-Encoding': 'gzip, deflate'
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
if self.auth_type == "basic_auth":
|
|
550
|
+
if not (self.username and self.password):
|
|
551
|
+
raise ValueError("basic_auth requires username and password")
|
|
552
|
+
return JIRA(
|
|
553
|
+
options=options,
|
|
554
|
+
basic_auth=(self.username, self.password)
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
if self.auth_type == "token_auth":
|
|
558
|
+
if not self.token:
|
|
559
|
+
# Some setups use username+token via basic; keep token_auth strict here
|
|
560
|
+
raise ValueError("token_auth requires a Personal Access Token")
|
|
561
|
+
return JIRA(options=options, token_auth=self.token)
|
|
562
|
+
|
|
563
|
+
if self.auth_type == "oauth":
|
|
564
|
+
# oauth_key_cert can be the PEM content or a file path to PEM
|
|
565
|
+
key_cert = self._read_key_cert(self.oauth_key_cert)
|
|
566
|
+
oauth_dict = {
|
|
567
|
+
"access_token": self.oauth_access_token,
|
|
568
|
+
"access_token_secret": self.oauth_access_token_secret,
|
|
569
|
+
"consumer_key": self.oauth_consumer_key,
|
|
570
|
+
"key_cert": key_cert,
|
|
571
|
+
}
|
|
572
|
+
if not all([oauth_dict.get("access_token"), oauth_dict.get("access_token_secret"),
|
|
573
|
+
oauth_dict.get("consumer_key"), oauth_dict.get("key_cert")]):
|
|
574
|
+
raise ValueError("oauth requires consumer_key, key_cert, access_token, access_token_secret")
|
|
575
|
+
return JIRA(options=options, oauth=oauth_dict)
|
|
576
|
+
|
|
577
|
+
raise ValueError(f"Unsupported auth_type: {self.auth_type}")
|
|
578
|
+
|
|
579
|
+
@staticmethod
|
|
580
|
+
def _read_key_cert(value: Optional[str]) -> Optional[str]:
|
|
581
|
+
if not value:
|
|
582
|
+
return None
|
|
583
|
+
# If looks like a path and exists, read it; else assume it's PEM content
|
|
584
|
+
if os.path.exists(value):
|
|
585
|
+
with open(value, "r", encoding="utf-8") as f:
|
|
586
|
+
return f.read()
|
|
587
|
+
return value
|
|
588
|
+
|
|
589
|
+
def set_tool_manager(self, manager: ToolManager):
|
|
590
|
+
"""Set the ToolManager reference for DataFrame sharing."""
|
|
591
|
+
self._tool_manager = manager
|
|
592
|
+
|
|
593
|
+
# -----------------------------
|
|
594
|
+
# Utility
|
|
595
|
+
# -----------------------------
|
|
596
|
+
def _issue_to_dict(self, issue_obj: Any) -> Dict[str, Any]:
|
|
597
|
+
# pycontribs Issue objects have a .raw (dict) and .key
|
|
598
|
+
try:
|
|
599
|
+
raw = getattr(issue_obj, "raw", None)
|
|
600
|
+
if isinstance(raw, dict):
|
|
601
|
+
return raw
|
|
602
|
+
# Fallback minimal structure
|
|
603
|
+
return {"id": getattr(issue_obj, "id", None), "key": getattr(issue_obj, "key", None)}
|
|
604
|
+
except Exception:
|
|
605
|
+
return {"id": getattr(issue_obj, "id", None), "key": getattr(issue_obj, "key", None)}
|
|
606
|
+
|
|
607
|
+
# ---- structured output helpers ----
|
|
608
|
+
def _import_string(self, path: str):
|
|
609
|
+
"""Import a dotted module path and return the attribute/class designated by the last name in the path."""
|
|
610
|
+
module_path, _, attr = path.rpartition(".")
|
|
611
|
+
if not module_path:
|
|
612
|
+
raise ValueError(f"Invalid model_path '{path}', expected 'package.module:Class' style")
|
|
613
|
+
module = importlib.import_module(module_path)
|
|
614
|
+
return getattr(module, attr)
|
|
615
|
+
|
|
616
|
+
def _get_by_path(self, data: Dict[str, Any], path: str, strict: bool = False) -> Any:
|
|
617
|
+
"""Get a value from a nested dict by dot-separated path. If strict and path not found, raises KeyError."""
|
|
618
|
+
cur: Any = data
|
|
619
|
+
for part in path.split('.'):
|
|
620
|
+
if isinstance(cur, dict) and part in cur:
|
|
621
|
+
cur = cur[part]
|
|
622
|
+
elif strict:
|
|
623
|
+
raise KeyError(f"Path '{path}' not found at '{part}'")
|
|
624
|
+
else:
|
|
625
|
+
return None
|
|
626
|
+
return cur
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
def _quote_jql_value(self, value: Union[str, int, float]) -> str:
|
|
630
|
+
"""Quote a JQL value, escaping special characters.
|
|
631
|
+
|
|
632
|
+
Jira's JQL treats characters like '@' as reserved when unquoted. This helper wraps
|
|
633
|
+
values in double quotes and escapes backslashes, double quotes, and newlines so that
|
|
634
|
+
user-provided identifiers (e.g., emails) are always valid JQL literals.
|
|
635
|
+
"""
|
|
636
|
+
|
|
637
|
+
text = str(value)
|
|
638
|
+
escaped = (
|
|
639
|
+
text.replace("\\", "\\\\")
|
|
640
|
+
.replace("\"", "\\\"")
|
|
641
|
+
.replace("\n", "\\n")
|
|
642
|
+
.replace("\r", "\\r")
|
|
643
|
+
)
|
|
644
|
+
return f'"{escaped}"'
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def _build_assignee_jql(
|
|
648
|
+
self, assignee: str, project: Optional[str] = None, default_project: Optional[str] = None
|
|
649
|
+
) -> str:
|
|
650
|
+
"""Construct a JQL query for an assignee, quoting values as needed."""
|
|
651
|
+
|
|
652
|
+
jql = f"assignee={self._quote_jql_value(assignee)}"
|
|
653
|
+
if project or default_project:
|
|
654
|
+
proj = project or default_project
|
|
655
|
+
jql = f"project={proj} AND ({jql})"
|
|
656
|
+
return jql
|
|
657
|
+
|
|
658
|
+
def _project_include(self, data: Dict[str, Any], include: List[str], strict: bool = False) -> Dict[str, Any]:
|
|
659
|
+
"""Return a dict including only the specified dot-paths, preserving nested structure."""
|
|
660
|
+
out: Dict[str, Any] = {}
|
|
661
|
+
for path in include:
|
|
662
|
+
val = self._get_by_path(data, path, strict=strict)
|
|
663
|
+
# Build nested structure mirroring the path
|
|
664
|
+
cursor = out
|
|
665
|
+
parts = path.split('.')
|
|
666
|
+
for i, p in enumerate(parts):
|
|
667
|
+
if i == len(parts) - 1:
|
|
668
|
+
cursor[p] = val
|
|
669
|
+
else:
|
|
670
|
+
cursor = cursor.setdefault(p, {})
|
|
671
|
+
return out
|
|
672
|
+
|
|
673
|
+
def _project_mapping(self, data: Dict[str, Any], mapping: Dict[str, str], strict: bool = False) -> Dict[str, Any]:
|
|
674
|
+
"""Return a dict with keys renamed/flattened according to mapping {dest_key: dot_path}."""
|
|
675
|
+
return {dest: self._get_by_path(data, src, strict=strict) for dest, src in mapping.items()}
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
def _apply_structured_output(self, raw: Dict[str, Any], opts: Optional[StructuredOutputOptions]) -> Dict[str, Any]:
|
|
679
|
+
"""Apply include/mapping/model to raw dict according to opts, returning the transformed dict."""
|
|
680
|
+
if not opts:
|
|
681
|
+
return raw
|
|
682
|
+
payload = raw
|
|
683
|
+
if opts.mapping:
|
|
684
|
+
payload = self._project_mapping(raw, opts.mapping, strict=opts.strict)
|
|
685
|
+
elif opts.include:
|
|
686
|
+
payload = self._project_include(raw, opts.include, strict=opts.strict)
|
|
687
|
+
if opts.model_path:
|
|
688
|
+
_model = self._import_string(opts.model_path)
|
|
689
|
+
try:
|
|
690
|
+
# pydantic v2
|
|
691
|
+
obj = _model.model_validate(payload) # type: ignore[attr-defined]
|
|
692
|
+
return obj.model_dump() # type: ignore[attr-defined]
|
|
693
|
+
except AttributeError:
|
|
694
|
+
# pydantic v1 fallback
|
|
695
|
+
obj = _model.parse_obj(payload)
|
|
696
|
+
return obj.dict()
|
|
697
|
+
return payload
|
|
698
|
+
|
|
699
|
+
def _ensure_structured(
|
|
700
|
+
self,
|
|
701
|
+
opts: Optional[Union[StructuredOutputOptions, Dict[str, Any]]]
|
|
702
|
+
) -> Optional[StructuredOutputOptions]:
|
|
703
|
+
"""Ensure opts is a StructuredOutputOptions instance if provided as a dict."""
|
|
704
|
+
if opts is None:
|
|
705
|
+
return None
|
|
706
|
+
if isinstance(opts, StructuredOutputOptions):
|
|
707
|
+
return opts
|
|
708
|
+
if isinstance(opts, dict):
|
|
709
|
+
try:
|
|
710
|
+
return StructuredOutputOptions(**opts)
|
|
711
|
+
except AttributeError:
|
|
712
|
+
return StructuredOutputOptions.model_validate(opts)
|
|
713
|
+
raise ValueError("structured must be a StructuredOutputOptions instance or a dict")
|
|
714
|
+
|
|
715
|
+
# -----------------------------
|
|
716
|
+
# Tools (public async methods)
|
|
717
|
+
# -----------------------------
|
|
718
|
+
@tool_schema(GetIssueInput)
|
|
719
|
+
async def jira_get_issue(
|
|
720
|
+
self,
|
|
721
|
+
issue: str,
|
|
722
|
+
fields: Optional[str] = None,
|
|
723
|
+
expand: Optional[str] = None,
|
|
724
|
+
structured: Optional[StructuredOutputOptions] = None,
|
|
725
|
+
) -> Union[Dict[str, Any], Any]:
|
|
726
|
+
"""Get a Jira issue by key or id.
|
|
727
|
+
|
|
728
|
+
Example: issue = jira.issue('JRA-1330')
|
|
729
|
+
|
|
730
|
+
If `structured` is provided, the output will be transformed according to the options.
|
|
731
|
+
"""
|
|
732
|
+
def _run():
|
|
733
|
+
return self.jira.issue(issue, fields=fields, expand=expand)
|
|
734
|
+
|
|
735
|
+
obj = await asyncio.to_thread(_run)
|
|
736
|
+
raw = self._issue_to_dict(obj)
|
|
737
|
+
structured = self._ensure_structured(structured)
|
|
738
|
+
|
|
739
|
+
return self._apply_structured_output(raw, structured) if structured else raw
|
|
740
|
+
|
|
741
|
+
@tool_schema(TransitionIssueInput)
|
|
742
|
+
async def jira_transition_issue(
|
|
743
|
+
self,
|
|
744
|
+
issue: str,
|
|
745
|
+
transition: Union[str, int],
|
|
746
|
+
fields: Optional[Dict[str, Any]] = None,
|
|
747
|
+
assignee: Optional[Dict[str, Any]] = None,
|
|
748
|
+
resolution: Optional[Dict[str, Any]] = None,
|
|
749
|
+
) -> Dict[str, Any]:
|
|
750
|
+
"""Transition a Jira issue.
|
|
751
|
+
|
|
752
|
+
Automatically sets 8h original estimate for issues without one
|
|
753
|
+
when transitioning to 'To Do', 'TODO', or 'In Progress'.
|
|
754
|
+
|
|
755
|
+
Example:
|
|
756
|
+
jira.transition_issue(issue, '5', assignee={'name': 'pm_user'}, resolution={'id': '3'})
|
|
757
|
+
"""
|
|
758
|
+
# Statuses that require an estimate
|
|
759
|
+
ESTIMATE_REQUIRED_TRANSITIONS = {'to do', 'todo', 'in progress', 'in-progress'}
|
|
760
|
+
DEFAULT_ESTIMATE = "8h"
|
|
761
|
+
|
|
762
|
+
# Check if this transition needs an estimate check
|
|
763
|
+
transition_name = str(transition).lower().strip()
|
|
764
|
+
needs_estimate_check = transition_name in ESTIMATE_REQUIRED_TRANSITIONS
|
|
765
|
+
|
|
766
|
+
# If transitioning to TODO/In Progress, check if issue has original estimate
|
|
767
|
+
if needs_estimate_check:
|
|
768
|
+
current_issue = await self.jira_get_issue(issue)
|
|
769
|
+
raw = current_issue.get("raw", current_issue)
|
|
770
|
+
timetracking = raw.get("fields", {}).get("timetracking", {}) if isinstance(raw, dict) else {}
|
|
771
|
+
original_estimate = timetracking.get("originalEstimate") if timetracking else None
|
|
772
|
+
|
|
773
|
+
if not original_estimate:
|
|
774
|
+
# Set default 8h estimate before transitioning
|
|
775
|
+
self.logger.info(f"Setting default {DEFAULT_ESTIMATE} estimate for {issue} before transition")
|
|
776
|
+
await self.jira_update_issue(
|
|
777
|
+
issue=issue,
|
|
778
|
+
original_estimate=DEFAULT_ESTIMATE
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
# Build kwargs as accepted by pycontribs
|
|
782
|
+
kwargs: Dict[str, Any] = {}
|
|
783
|
+
if fields:
|
|
784
|
+
kwargs["fields"] = fields
|
|
785
|
+
if assignee:
|
|
786
|
+
kwargs["assignee"] = assignee
|
|
787
|
+
if resolution:
|
|
788
|
+
kwargs["resolution"] = resolution
|
|
789
|
+
|
|
790
|
+
def _run():
|
|
791
|
+
# Transition may be id or name; let Jira client resolve
|
|
792
|
+
return self.jira.transition_issue(issue, transition, **kwargs)
|
|
793
|
+
|
|
794
|
+
await asyncio.to_thread(_run)
|
|
795
|
+
# Return the latest state of the issue
|
|
796
|
+
return await self.jira_get_issue(issue)
|
|
797
|
+
|
|
798
|
+
@tool_schema(AddAttachmentInput)
|
|
799
|
+
async def jira_add_attachment(self, issue: str, attachment: str) -> Dict[str, Any]:
|
|
800
|
+
"""Add an attachment to an issue.
|
|
801
|
+
|
|
802
|
+
Example: jira.add_attachment(issue=issue, attachment='/path/to/file.txt')
|
|
803
|
+
"""
|
|
804
|
+
def _run():
|
|
805
|
+
return self.jira.add_attachment(issue=issue, attachment=attachment)
|
|
806
|
+
|
|
807
|
+
await asyncio.to_thread(_run)
|
|
808
|
+
return {"ok": True, "issue": issue, "attachment": attachment}
|
|
809
|
+
|
|
810
|
+
@tool_schema(AssignIssueInput)
|
|
811
|
+
async def jira_assign_issue(self, issue: str, assignee: str) -> Dict[str, Any]:
|
|
812
|
+
"""Assign an issue to a user.
|
|
813
|
+
|
|
814
|
+
Example: jira.assign_issue(issue, 'newassignee')
|
|
815
|
+
"""
|
|
816
|
+
def _run():
|
|
817
|
+
return self.jira.assign_issue(issue, assignee)
|
|
818
|
+
|
|
819
|
+
await asyncio.to_thread(_run)
|
|
820
|
+
return {"ok": True, "issue": issue, "assignee": assignee}
|
|
821
|
+
|
|
822
|
+
@tool_schema(CreateIssueInput)
|
|
823
|
+
async def jira_create_issue(
|
|
824
|
+
self,
|
|
825
|
+
project: str,
|
|
826
|
+
summary: str,
|
|
827
|
+
issuetype: str = "Task",
|
|
828
|
+
description: Optional[str] = None,
|
|
829
|
+
assignee: Optional[str] = None,
|
|
830
|
+
priority: Optional[str] = None,
|
|
831
|
+
labels: Optional[List[str]] = None,
|
|
832
|
+
due_date: Optional[str] = None,
|
|
833
|
+
parent: Optional[str] = None,
|
|
834
|
+
original_estimate: Optional[str] = None,
|
|
835
|
+
fields: Optional[Dict[str, Any]] = None,
|
|
836
|
+
) -> Dict[str, Any]:
|
|
837
|
+
"""Create a new issue.
|
|
838
|
+
|
|
839
|
+
Examples:
|
|
840
|
+
# Create a bug with estimate
|
|
841
|
+
jira_create_issue(
|
|
842
|
+
project='NAV',
|
|
843
|
+
summary='Login button not working',
|
|
844
|
+
issuetype='Bug',
|
|
845
|
+
description='Users cannot click the login button',
|
|
846
|
+
priority='High',
|
|
847
|
+
original_estimate='4h'
|
|
848
|
+
)
|
|
849
|
+
|
|
850
|
+
# Create a story
|
|
851
|
+
jira_create_issue(
|
|
852
|
+
project='NAV',
|
|
853
|
+
summary='Add user profile page',
|
|
854
|
+
issuetype='Story',
|
|
855
|
+
labels=['frontend', 'user-experience'],
|
|
856
|
+
original_estimate='2d'
|
|
857
|
+
)
|
|
858
|
+
|
|
859
|
+
# Create a sub-task
|
|
860
|
+
jira_create_issue(
|
|
861
|
+
project='NAV',
|
|
862
|
+
summary='Design mockup',
|
|
863
|
+
issuetype='Sub-task',
|
|
864
|
+
parent='NAV-123'
|
|
865
|
+
)
|
|
866
|
+
"""
|
|
867
|
+
# Build fields dict
|
|
868
|
+
issue_fields: Dict[str, Any] = {
|
|
869
|
+
"project": {"key": project},
|
|
870
|
+
"summary": summary,
|
|
871
|
+
"issuetype": {"name": issuetype},
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
if description:
|
|
875
|
+
issue_fields["description"] = description
|
|
876
|
+
if assignee:
|
|
877
|
+
issue_fields["assignee"] = {"accountId": assignee}
|
|
878
|
+
if priority:
|
|
879
|
+
issue_fields["priority"] = {"name": priority}
|
|
880
|
+
if labels:
|
|
881
|
+
issue_fields["labels"] = labels
|
|
882
|
+
if due_date:
|
|
883
|
+
issue_fields["duedate"] = due_date
|
|
884
|
+
if parent:
|
|
885
|
+
issue_fields["parent"] = {"key": parent}
|
|
886
|
+
if original_estimate:
|
|
887
|
+
issue_fields["timetracking"] = {"originalEstimate": original_estimate}
|
|
888
|
+
|
|
889
|
+
# Merge with additional fields if provided
|
|
890
|
+
if fields:
|
|
891
|
+
issue_fields.update(fields)
|
|
892
|
+
|
|
893
|
+
def _run():
|
|
894
|
+
return self.jira.create_issue(fields=issue_fields)
|
|
895
|
+
|
|
896
|
+
obj = await asyncio.to_thread(_run)
|
|
897
|
+
data = self._issue_to_dict(obj)
|
|
898
|
+
return {"ok": True, "id": data.get("id"), "key": data.get("key"), "issue": data}
|
|
899
|
+
|
|
900
|
+
@tool_schema(UpdateIssueInput)
|
|
901
|
+
async def jira_update_issue(
|
|
902
|
+
self,
|
|
903
|
+
issue: str,
|
|
904
|
+
summary: Optional[str] = None,
|
|
905
|
+
description: Optional[str] = None,
|
|
906
|
+
assignee: Optional[Dict[str, Any]] = None,
|
|
907
|
+
acceptance_criteria: Optional[str] = None,
|
|
908
|
+
original_estimate: Optional[str] = None,
|
|
909
|
+
time_tracking: Optional[Dict[str, str]] = None,
|
|
910
|
+
affected_versions: Optional[List[Dict[str, str]]] = None,
|
|
911
|
+
due_date: Optional[str] = None,
|
|
912
|
+
labels: Optional[List[str]] = None,
|
|
913
|
+
issuetype: Optional[Dict[str, str]] = None,
|
|
914
|
+
priority: Optional[Dict[str, str]] = None,
|
|
915
|
+
fields: Optional[Dict[str, Any]] = None,
|
|
916
|
+
) -> Dict[str, Any]:
|
|
917
|
+
"""Update an existing issue.
|
|
918
|
+
|
|
919
|
+
Examples:
|
|
920
|
+
# Update summary and description
|
|
921
|
+
jira_update_issue(issue='NAV-123', summary='New title', description='Updated desc')
|
|
922
|
+
|
|
923
|
+
# Update assignee
|
|
924
|
+
jira_update_issue(issue='NAV-123', assignee={'accountId': 'abc123'})
|
|
925
|
+
|
|
926
|
+
# Update due date and labels
|
|
927
|
+
jira_update_issue(issue='NAV-123', due_date='2025-01-15', labels=['backend', 'urgent'])
|
|
928
|
+
|
|
929
|
+
# Update time tracking
|
|
930
|
+
jira_update_issue(issue='NAV-123', time_tracking={'originalEstimate': '8h', 'remainingEstimate': '4h'})
|
|
931
|
+
|
|
932
|
+
# Change issue type
|
|
933
|
+
jira_update_issue(issue='NAV-123', issuetype={'name': 'Bug'})
|
|
934
|
+
"""
|
|
935
|
+
update_kwargs: Dict[str, Any] = {}
|
|
936
|
+
update_fields: Dict[str, Any] = {}
|
|
937
|
+
|
|
938
|
+
# Standard fields
|
|
939
|
+
if summary is not None:
|
|
940
|
+
update_fields["summary"] = summary
|
|
941
|
+
if description is not None:
|
|
942
|
+
update_fields["description"] = description
|
|
943
|
+
if assignee is not None:
|
|
944
|
+
update_fields["assignee"] = assignee
|
|
945
|
+
if due_date is not None:
|
|
946
|
+
update_fields["duedate"] = due_date
|
|
947
|
+
if labels is not None:
|
|
948
|
+
update_fields["labels"] = labels
|
|
949
|
+
if issuetype is not None:
|
|
950
|
+
update_fields["issuetype"] = issuetype
|
|
951
|
+
if priority is not None:
|
|
952
|
+
update_fields["priority"] = priority
|
|
953
|
+
if affected_versions is not None:
|
|
954
|
+
update_fields["versions"] = affected_versions
|
|
955
|
+
|
|
956
|
+
# Time tracking (special field)
|
|
957
|
+
if time_tracking is not None:
|
|
958
|
+
update_fields["timetracking"] = time_tracking
|
|
959
|
+
elif original_estimate is not None:
|
|
960
|
+
update_fields["timetracking"] = {"originalEstimate": original_estimate}
|
|
961
|
+
|
|
962
|
+
# Acceptance criteria (often a custom field - common ones are customfield_10021 or customfield_10022)
|
|
963
|
+
# This is instance-specific, so we'll try the common one or use fields dict
|
|
964
|
+
if acceptance_criteria is not None:
|
|
965
|
+
# Try common custom field IDs for acceptance criteria
|
|
966
|
+
update_fields["customfield_10021"] = acceptance_criteria
|
|
967
|
+
|
|
968
|
+
# Merge with arbitrary fields if provided
|
|
969
|
+
if fields:
|
|
970
|
+
update_fields.update(fields)
|
|
971
|
+
|
|
972
|
+
if update_fields:
|
|
973
|
+
update_kwargs["fields"] = update_fields
|
|
974
|
+
|
|
975
|
+
def _run():
|
|
976
|
+
# jira.issue returns Issue; then we call .update on it
|
|
977
|
+
obj = self.jira.issue(issue)
|
|
978
|
+
obj.update(**update_kwargs)
|
|
979
|
+
return obj
|
|
980
|
+
|
|
981
|
+
obj = await asyncio.to_thread(_run)
|
|
982
|
+
return self._issue_to_dict(obj)
|
|
983
|
+
|
|
984
|
+
@tool_schema(FindIssuesByAssigneeInput)
|
|
985
|
+
async def jira_find_issues_by_assignee(
|
|
986
|
+
self, assignee: str, project: Optional[str] = None, max_results: int = 50
|
|
987
|
+
) -> Dict[str, Any]:
|
|
988
|
+
"""Find issues assigned to a given user (thin wrapper over jira_search_issues).
|
|
989
|
+
|
|
990
|
+
Example: jira.search_issues("assignee=admin")
|
|
991
|
+
"""
|
|
992
|
+
|
|
993
|
+
jql = self._build_assignee_jql(assignee, project, self.default_project)
|
|
994
|
+
return await self.jira_search_issues(jql=jql, max_results=max_results)
|
|
995
|
+
|
|
996
|
+
@tool_schema(GetTransitionsInput)
|
|
997
|
+
async def jira_get_transitions(
|
|
998
|
+
self,
|
|
999
|
+
issue: str,
|
|
1000
|
+
expand: Optional[str] = None
|
|
1001
|
+
) -> List[Dict[str, Any]]:
|
|
1002
|
+
"""Get available transitions for an issue.
|
|
1003
|
+
|
|
1004
|
+
Example: jira.jira_get_transitions('JRA-1330')
|
|
1005
|
+
"""
|
|
1006
|
+
def _run():
|
|
1007
|
+
return self.jira.transitions(issue, expand=expand)
|
|
1008
|
+
|
|
1009
|
+
transitions = await asyncio.to_thread(_run)
|
|
1010
|
+
# transitions returns a list of dicts typically
|
|
1011
|
+
return transitions
|
|
1012
|
+
|
|
1013
|
+
@tool_schema(AddCommentInput)
|
|
1014
|
+
async def jira_add_comment(
|
|
1015
|
+
self,
|
|
1016
|
+
issue: str,
|
|
1017
|
+
body: str,
|
|
1018
|
+
is_internal: bool = False
|
|
1019
|
+
) -> Dict[str, Any]:
|
|
1020
|
+
"""Add a comment to an issue.
|
|
1021
|
+
|
|
1022
|
+
Example: jira.jira_add_comment('JRA-1330', 'This is a comment')
|
|
1023
|
+
"""
|
|
1024
|
+
def _run():
|
|
1025
|
+
return self.jira.add_comment(issue, body)
|
|
1026
|
+
|
|
1027
|
+
comment = await asyncio.to_thread(_run)
|
|
1028
|
+
# Use helper to extract raw dict if available
|
|
1029
|
+
return self._issue_to_dict(comment)
|
|
1030
|
+
|
|
1031
|
+
@tool_schema(AddWorklogInput)
|
|
1032
|
+
async def jira_add_worklog(
|
|
1033
|
+
self,
|
|
1034
|
+
issue: str,
|
|
1035
|
+
time_spent: str,
|
|
1036
|
+
comment: Optional[str] = None,
|
|
1037
|
+
started: Optional[str] = None
|
|
1038
|
+
) -> Dict[str, Any]:
|
|
1039
|
+
"""Add worklog to an issue.
|
|
1040
|
+
|
|
1041
|
+
Example: jira.jira_add_worklog('JRA-1330', '1h 30m', 'Working on feature')
|
|
1042
|
+
"""
|
|
1043
|
+
def _run():
|
|
1044
|
+
return self.jira.add_worklog(
|
|
1045
|
+
issue=issue,
|
|
1046
|
+
timeSpent=time_spent,
|
|
1047
|
+
comment=comment,
|
|
1048
|
+
started=started
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
worklog = await asyncio.to_thread(_run)
|
|
1052
|
+
# Worklog object typically has id, etc.
|
|
1053
|
+
val = self._issue_to_dict(worklog)
|
|
1054
|
+
# Ensure we return something useful even if raw is missing
|
|
1055
|
+
if not val or not val.get('id'):
|
|
1056
|
+
return {
|
|
1057
|
+
"id": getattr(worklog, "id", None),
|
|
1058
|
+
"issue": issue,
|
|
1059
|
+
"timeSpent": time_spent,
|
|
1060
|
+
"created": getattr(worklog, "created", None)
|
|
1061
|
+
}
|
|
1062
|
+
return val
|
|
1063
|
+
|
|
1064
|
+
@tool_schema(GetIssueTypesInput)
|
|
1065
|
+
async def jira_get_issue_types(self, project: Optional[str] = None) -> List[Dict[str, Any]]:
|
|
1066
|
+
"""List issue types, optionally for a specific project.
|
|
1067
|
+
|
|
1068
|
+
Example: jira.jira_get_issue_types(project='PROJ')
|
|
1069
|
+
"""
|
|
1070
|
+
def _run():
|
|
1071
|
+
if project:
|
|
1072
|
+
proj = self.jira.project(project)
|
|
1073
|
+
return proj.issueTypes
|
|
1074
|
+
else:
|
|
1075
|
+
return self.jira.issue_types()
|
|
1076
|
+
|
|
1077
|
+
types = await asyncio.to_thread(_run)
|
|
1078
|
+
# types is list of IssueType objects
|
|
1079
|
+
return [
|
|
1080
|
+
{"id": t.id, "name": t.name, "description": getattr(t, "description", "")}
|
|
1081
|
+
for t in types
|
|
1082
|
+
]
|
|
1083
|
+
|
|
1084
|
+
@tool_schema(GetProjectsInput)
|
|
1085
|
+
async def jira_get_projects(self) -> List[Dict[str, Any]]:
|
|
1086
|
+
"""List all accessible projects.
|
|
1087
|
+
|
|
1088
|
+
Example: jira.jira_get_projects()
|
|
1089
|
+
"""
|
|
1090
|
+
def _run():
|
|
1091
|
+
return self.jira.projects()
|
|
1092
|
+
|
|
1093
|
+
projs = await asyncio.to_thread(_run)
|
|
1094
|
+
return [{"id": p.id, "key": p.key, "name": p.name} for p in projs]
|
|
1095
|
+
|
|
1096
|
+
@tool_schema(SearchUsersInput)
|
|
1097
|
+
async def jira_search_users(
|
|
1098
|
+
self,
|
|
1099
|
+
user: Optional[str] = None,
|
|
1100
|
+
start_at: int = 0,
|
|
1101
|
+
max_results: int = 50,
|
|
1102
|
+
include_active: bool = True,
|
|
1103
|
+
include_inactive: bool = False,
|
|
1104
|
+
query: Optional[str] = None,
|
|
1105
|
+
) -> List[Dict[str, Any]]:
|
|
1106
|
+
"""Search for users matching the specified search string.
|
|
1107
|
+
|
|
1108
|
+
"username" query parameter is deprecated in Jira Cloud; the expected parameter now is "query".
|
|
1109
|
+
But the "user" parameter is kept for backwards compatibility.
|
|
1110
|
+
|
|
1111
|
+
Example:
|
|
1112
|
+
jira.search_users(query='john.doe@example.com')
|
|
1113
|
+
"""
|
|
1114
|
+
def _run():
|
|
1115
|
+
return self.jira.search_users(
|
|
1116
|
+
user=user,
|
|
1117
|
+
startAt=start_at,
|
|
1118
|
+
maxResults=max_results,
|
|
1119
|
+
includeActive=include_active,
|
|
1120
|
+
includeInactive=include_inactive,
|
|
1121
|
+
query=query
|
|
1122
|
+
)
|
|
1123
|
+
|
|
1124
|
+
users = await asyncio.to_thread(_run)
|
|
1125
|
+
# Convert resources to dicts
|
|
1126
|
+
return [self._issue_to_dict(u) for u in users]
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
def _store_dataframe(
|
|
1130
|
+
self,
|
|
1131
|
+
name: str,
|
|
1132
|
+
df: pd.DataFrame,
|
|
1133
|
+
metadata: Dict[str, Any]
|
|
1134
|
+
) -> str:
|
|
1135
|
+
"""Store DataFrame in ToolManager's shared context."""
|
|
1136
|
+
if self._tool_manager is None:
|
|
1137
|
+
self.logger.warning(
|
|
1138
|
+
"No ToolManager set. DataFrame not shared. "
|
|
1139
|
+
"Call set_tool_manager() to enable sharing."
|
|
1140
|
+
)
|
|
1141
|
+
return name
|
|
1142
|
+
|
|
1143
|
+
try:
|
|
1144
|
+
self._tool_manager.share_dataframe(name, df, metadata)
|
|
1145
|
+
self.logger.info(f"DataFrame '{name}' stored: {len(df)} rows")
|
|
1146
|
+
return name
|
|
1147
|
+
except Exception as e:
|
|
1148
|
+
self.logger.error(f"Failed to store DataFrame: {e}")
|
|
1149
|
+
return name
|
|
1150
|
+
|
|
1151
|
+
def _json_issues_to_dataframe(self, issues: List[Dict[str, Any]]) -> pd.DataFrame:
|
|
1152
|
+
"""
|
|
1153
|
+
Convert JSON issues to a flattened DataFrame.
|
|
1154
|
+
|
|
1155
|
+
Works with json_result=True output format.
|
|
1156
|
+
"""
|
|
1157
|
+
if not issues:
|
|
1158
|
+
return pd.DataFrame()
|
|
1159
|
+
|
|
1160
|
+
rows = []
|
|
1161
|
+
for issue in issues:
|
|
1162
|
+
fields = issue.get('fields', {}) or {}
|
|
1163
|
+
|
|
1164
|
+
# Safe extraction helpers
|
|
1165
|
+
def get_nested(obj, *keys, default=None):
|
|
1166
|
+
for key in keys:
|
|
1167
|
+
if obj is None or not isinstance(obj, dict):
|
|
1168
|
+
return default
|
|
1169
|
+
obj = obj.get(key)
|
|
1170
|
+
return obj if obj is not None else default
|
|
1171
|
+
|
|
1172
|
+
row = {
|
|
1173
|
+
'key': issue.get('key'),
|
|
1174
|
+
'id': issue.get('id'),
|
|
1175
|
+
'self': issue.get('self'),
|
|
1176
|
+
|
|
1177
|
+
# Summary & Description
|
|
1178
|
+
'summary': fields.get('summary'),
|
|
1179
|
+
'description': (fields.get('description') or '')[:500] if fields.get('description') else None,
|
|
1180
|
+
|
|
1181
|
+
# People
|
|
1182
|
+
'assignee_id': get_nested(fields, 'assignee', 'accountId') or get_nested(fields, 'assignee', 'name'),
|
|
1183
|
+
'assignee_name': get_nested(fields, 'assignee', 'displayName'),
|
|
1184
|
+
'reporter_id': get_nested(fields, 'reporter', 'accountId') or get_nested(fields, 'reporter', 'name'),
|
|
1185
|
+
'reporter_name': get_nested(fields, 'reporter', 'displayName'),
|
|
1186
|
+
|
|
1187
|
+
# Status & Priority
|
|
1188
|
+
'status': get_nested(fields, 'status', 'name'),
|
|
1189
|
+
'status_category': get_nested(fields, 'status', 'statusCategory', 'name'),
|
|
1190
|
+
'priority': get_nested(fields, 'priority', 'name'),
|
|
1191
|
+
|
|
1192
|
+
# Type & Project
|
|
1193
|
+
'issuetype': get_nested(fields, 'issuetype', 'name'),
|
|
1194
|
+
'project_key': get_nested(fields, 'project', 'key'),
|
|
1195
|
+
'project_name': get_nested(fields, 'project', 'name'),
|
|
1196
|
+
|
|
1197
|
+
# Dates
|
|
1198
|
+
'created': fields.get('created'),
|
|
1199
|
+
'updated': fields.get('updated'),
|
|
1200
|
+
'resolved': fields.get('resolutiondate'),
|
|
1201
|
+
'due_date': fields.get('duedate'),
|
|
1202
|
+
|
|
1203
|
+
# Estimates (story points field ID varies by instance)
|
|
1204
|
+
'story_points': fields.get('customfield_10016'),
|
|
1205
|
+
'time_estimate': fields.get('timeoriginalestimate'),
|
|
1206
|
+
'time_spent': fields.get('timespent'),
|
|
1207
|
+
|
|
1208
|
+
# Collections
|
|
1209
|
+
'labels': ','.join(fields.get('labels', [])) if fields.get('labels') else None,
|
|
1210
|
+
'components': ','.join([c.get('name', '') for c in (fields.get('components') or [])]) if fields.get('components') else None,
|
|
1211
|
+
}
|
|
1212
|
+
rows.append(row)
|
|
1213
|
+
|
|
1214
|
+
df = pd.DataFrame(rows)
|
|
1215
|
+
|
|
1216
|
+
# Convert date columns
|
|
1217
|
+
for col in ['created', 'updated', 'resolved', 'due_date']:
|
|
1218
|
+
if col in df.columns:
|
|
1219
|
+
df[col] = pd.to_datetime(df[col], errors='coerce', utc=True)
|
|
1220
|
+
|
|
1221
|
+
# Add derived columns for easy grouping
|
|
1222
|
+
if 'created' in df.columns and df['created'].notna().any():
|
|
1223
|
+
df['created_month'] = df['created'].dt.to_period('M').astype(str)
|
|
1224
|
+
df['created_week'] = df['created'].dt.strftime('%Y-W%W')
|
|
1225
|
+
|
|
1226
|
+
return df
|
|
1227
|
+
|
|
1228
|
+
def _generate_summary(
|
|
1229
|
+
self,
|
|
1230
|
+
df: pd.DataFrame,
|
|
1231
|
+
jql: str,
|
|
1232
|
+
total: int,
|
|
1233
|
+
group_by: Optional[List[str]] = None
|
|
1234
|
+
) -> Dict[str, Any]:
|
|
1235
|
+
"""Generate summary statistics for LLM consumption."""
|
|
1236
|
+
summary = {
|
|
1237
|
+
"total_count": total,
|
|
1238
|
+
"fetched_count": len(df),
|
|
1239
|
+
"jql": jql,
|
|
1240
|
+
}
|
|
1241
|
+
|
|
1242
|
+
if df.empty:
|
|
1243
|
+
return summary
|
|
1244
|
+
|
|
1245
|
+
# Default groupings
|
|
1246
|
+
default_groups = ['assignee_name', 'status']
|
|
1247
|
+
groups_to_use = group_by or default_groups
|
|
1248
|
+
|
|
1249
|
+
# Generate counts for each field
|
|
1250
|
+
for field in groups_to_use:
|
|
1251
|
+
if field in df.columns:
|
|
1252
|
+
counts = df[field].value_counts(dropna=False).head(25).to_dict()
|
|
1253
|
+
# Replace NaN key with "Unassigned"
|
|
1254
|
+
if pd.isna(list(counts.keys())[0]) if counts else False:
|
|
1255
|
+
counts = {("Unassigned" if pd.isna(k) else k): v for k, v in counts.items()}
|
|
1256
|
+
summary[f"by_{field}"] = counts
|
|
1257
|
+
|
|
1258
|
+
# Date range if available
|
|
1259
|
+
if 'created' in df.columns and df['created'].notna().any():
|
|
1260
|
+
summary["date_range"] = {
|
|
1261
|
+
"oldest": df['created'].min().isoformat() if pd.notna(df['created'].min()) else None,
|
|
1262
|
+
"newest": df['created'].max().isoformat() if pd.notna(df['created'].max()) else None,
|
|
1263
|
+
}
|
|
1264
|
+
|
|
1265
|
+
return summary
|
|
1266
|
+
|
|
1267
|
+
def _resolve_fields(
|
|
1268
|
+
self,
|
|
1269
|
+
fields: Optional[str],
|
|
1270
|
+
for_counting: bool = False,
|
|
1271
|
+
group_by: Optional[List[str]] = None
|
|
1272
|
+
) -> Optional[str]:
|
|
1273
|
+
"""
|
|
1274
|
+
Resolve fields parameter to actual field string.
|
|
1275
|
+
|
|
1276
|
+
Args:
|
|
1277
|
+
fields: User input - preset name or field string
|
|
1278
|
+
for_counting: If True and fields is None, auto-select minimal
|
|
1279
|
+
group_by: If provided, select only fields needed for these groupings
|
|
1280
|
+
"""
|
|
1281
|
+
# If explicit fields provided, check for preset
|
|
1282
|
+
if fields:
|
|
1283
|
+
preset = FIELD_PRESETS.get(fields.lower())
|
|
1284
|
+
if preset:
|
|
1285
|
+
self.logger.debug(f"Using field preset '{fields}': {preset}")
|
|
1286
|
+
return preset
|
|
1287
|
+
return fields
|
|
1288
|
+
|
|
1289
|
+
# Auto-select for counting based on group_by
|
|
1290
|
+
if for_counting and group_by:
|
|
1291
|
+
field_map = {
|
|
1292
|
+
'assignee': 'assignee',
|
|
1293
|
+
'reporter': 'reporter',
|
|
1294
|
+
'status': 'status',
|
|
1295
|
+
'priority': 'priority',
|
|
1296
|
+
'issuetype': 'issuetype',
|
|
1297
|
+
'project': 'project',
|
|
1298
|
+
'created_month': 'created',
|
|
1299
|
+
}
|
|
1300
|
+
needed = {'key'}
|
|
1301
|
+
for g in group_by:
|
|
1302
|
+
if g in field_map:
|
|
1303
|
+
needed.add(field_map[g])
|
|
1304
|
+
return ','.join(sorted(needed))
|
|
1305
|
+
|
|
1306
|
+
# Default for counting without specific groups
|
|
1307
|
+
if for_counting:
|
|
1308
|
+
return FIELD_PRESETS["count"]
|
|
1309
|
+
|
|
1310
|
+
# No resolution needed
|
|
1311
|
+
return fields
|
|
1312
|
+
|
|
1313
|
+
@tool_schema(SearchIssuesInput)
|
|
1314
|
+
async def jira_search_issues(
|
|
1315
|
+
self,
|
|
1316
|
+
jql: str,
|
|
1317
|
+
start_at: int = 0,
|
|
1318
|
+
max_results: Optional[int] = 100,
|
|
1319
|
+
fields: Optional[str] = None,
|
|
1320
|
+
expand: Optional[str] = None,
|
|
1321
|
+
json_result: bool = True,
|
|
1322
|
+
store_as_dataframe: bool = False,
|
|
1323
|
+
dataframe_name: Optional[str] = None,
|
|
1324
|
+
summary_only: bool = False,
|
|
1325
|
+
structured: Optional[StructuredOutputOptions] = None,
|
|
1326
|
+
) -> Dict[str, Any]:
|
|
1327
|
+
"""
|
|
1328
|
+
Search issues with JQL.
|
|
1329
|
+
|
|
1330
|
+
For efficiency:
|
|
1331
|
+
- Use `fields` to request only needed data (e.g., 'key,assignee,status')
|
|
1332
|
+
- Use `max_results=None` to fetch all matching issues
|
|
1333
|
+
- Use `summary_only=True` for counts to avoid context bloat
|
|
1334
|
+
- Use `store_as_dataframe=True` for complex analysis with PythonPandasTool
|
|
1335
|
+
|
|
1336
|
+
Examples:
|
|
1337
|
+
---------
|
|
1338
|
+
# Simple search (default)
|
|
1339
|
+
jira_search_issues(jql="project = NAV AND status = Open")
|
|
1340
|
+
|
|
1341
|
+
# Fetch all issues for counting
|
|
1342
|
+
jira_search_issues(
|
|
1343
|
+
jql="project = NAV AND status = Open",
|
|
1344
|
+
max_results=None, # Fetch all!
|
|
1345
|
+
fields="key,assignee,status",
|
|
1346
|
+
summary_only=True
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
# Full data for analysis
|
|
1350
|
+
jira_search_issues(
|
|
1351
|
+
jql="project = NAV",
|
|
1352
|
+
max_results=None,
|
|
1353
|
+
fields="key,summary,assignee,status,created,priority",
|
|
1354
|
+
store_as_dataframe=True,
|
|
1355
|
+
dataframe_name="nav_issues"
|
|
1356
|
+
)
|
|
1357
|
+
# Then use PythonPandasTool to analyze 'nav_issues' DataFrame
|
|
1358
|
+
"""
|
|
1359
|
+
|
|
1360
|
+
self.logger.info(
|
|
1361
|
+
f"Executing JQL: {jql} with max results {max_results}"
|
|
1362
|
+
)
|
|
1363
|
+
|
|
1364
|
+
# Use enhanced_search_issues for Jira Cloud (uses nextPageToken pagination)
|
|
1365
|
+
def _run_enhanced_search(page_token: Optional[str], current_max: int):
|
|
1366
|
+
return self.jira.enhanced_search_issues(
|
|
1367
|
+
jql,
|
|
1368
|
+
maxResults=current_max,
|
|
1369
|
+
fields=fields.split(',') if fields else None,
|
|
1370
|
+
expand=expand,
|
|
1371
|
+
nextPageToken=page_token
|
|
1372
|
+
)
|
|
1373
|
+
|
|
1374
|
+
all_issues = []
|
|
1375
|
+
fetched = 0
|
|
1376
|
+
next_page_token: Optional[str] = None
|
|
1377
|
+
is_last = False
|
|
1378
|
+
|
|
1379
|
+
# Pagination loop using nextPageToken
|
|
1380
|
+
# If max_results is None, fetch all (loop until isLast=True)
|
|
1381
|
+
while not is_last:
|
|
1382
|
+
# Calculate how many we still need
|
|
1383
|
+
# Use 100 per page if fetching all, otherwise remaining
|
|
1384
|
+
if max_results is None:
|
|
1385
|
+
page_size = 100 # Reasonable page size for full fetch
|
|
1386
|
+
else:
|
|
1387
|
+
remaining = max_results - fetched
|
|
1388
|
+
if remaining <= 0:
|
|
1389
|
+
break
|
|
1390
|
+
page_size = min(remaining, 100)
|
|
1391
|
+
|
|
1392
|
+
# Using asyncio.to_thread for the blocking call
|
|
1393
|
+
result_list = await asyncio.to_thread(_run_enhanced_search, next_page_token, page_size)
|
|
1394
|
+
|
|
1395
|
+
# enhanced_search_issues returns a ResultList object
|
|
1396
|
+
batch_issues = [self._issue_to_dict(i) for i in result_list]
|
|
1397
|
+
|
|
1398
|
+
# Get pagination info from ResultList
|
|
1399
|
+
next_page_token = getattr(result_list, 'nextPageToken', None)
|
|
1400
|
+
is_last = getattr(result_list, 'isLast', True) # Default to True if missing
|
|
1401
|
+
|
|
1402
|
+
if not batch_issues:
|
|
1403
|
+
break
|
|
1404
|
+
|
|
1405
|
+
all_issues.extend(batch_issues)
|
|
1406
|
+
fetched += len(batch_issues)
|
|
1407
|
+
|
|
1408
|
+
# If max_results is set and we've reached it, stop
|
|
1409
|
+
if max_results is not None and fetched >= max_results:
|
|
1410
|
+
break
|
|
1411
|
+
|
|
1412
|
+
# If no more pages, stop
|
|
1413
|
+
if is_last or next_page_token is None:
|
|
1414
|
+
break
|
|
1415
|
+
|
|
1416
|
+
issues = all_issues
|
|
1417
|
+
|
|
1418
|
+
# Total is not returned by enhanced_search_issues, use fetched count
|
|
1419
|
+
total = len(issues)
|
|
1420
|
+
|
|
1421
|
+
# Convert to DataFrame
|
|
1422
|
+
df = self._json_issues_to_dataframe(issues)
|
|
1423
|
+
|
|
1424
|
+
# Store DataFrame if requested
|
|
1425
|
+
df_name = dataframe_name or "jira_issues"
|
|
1426
|
+
if structured:
|
|
1427
|
+
items = [self._apply_structured_output(it, structured) for it in issues]
|
|
1428
|
+
return {"total": total, "issues": items}
|
|
1429
|
+
|
|
1430
|
+
if store_as_dataframe and not df.empty:
|
|
1431
|
+
self._store_dataframe(
|
|
1432
|
+
df_name,
|
|
1433
|
+
df,
|
|
1434
|
+
{
|
|
1435
|
+
"jql": jql,
|
|
1436
|
+
"total": total,
|
|
1437
|
+
"fetched_at": datetime.now().isoformat(),
|
|
1438
|
+
"fields_requested": fields,
|
|
1439
|
+
}
|
|
1440
|
+
)
|
|
1441
|
+
|
|
1442
|
+
# Build response
|
|
1443
|
+
if summary_only:
|
|
1444
|
+
# Return summary with counts - minimal context usage
|
|
1445
|
+
result = self._generate_summary(df, jql, total)
|
|
1446
|
+
result["pagination"] = {
|
|
1447
|
+
"start_at": start_at,
|
|
1448
|
+
"max_results": max_results,
|
|
1449
|
+
"returned": len(issues),
|
|
1450
|
+
"total": total,
|
|
1451
|
+
"has_more": (start_at + len(issues)) < total,
|
|
1452
|
+
}
|
|
1453
|
+
if store_as_dataframe:
|
|
1454
|
+
result["dataframe_name"] = df_name
|
|
1455
|
+
result["dataframe_info"] = (
|
|
1456
|
+
f"Full data stored in DataFrame '{df_name}' with {len(df)} rows. "
|
|
1457
|
+
f"Use PythonPandasTool for custom aggregations."
|
|
1458
|
+
)
|
|
1459
|
+
return result
|
|
1460
|
+
|
|
1461
|
+
else:
|
|
1462
|
+
# Return issues with metadata
|
|
1463
|
+
result = {
|
|
1464
|
+
"total": total,
|
|
1465
|
+
"issues": issues,
|
|
1466
|
+
"pagination": {
|
|
1467
|
+
"start_at": start_at,
|
|
1468
|
+
"max_results": max_results,
|
|
1469
|
+
"returned": len(issues),
|
|
1470
|
+
"total": total,
|
|
1471
|
+
"has_more": (start_at + len(issues)) < total,
|
|
1472
|
+
},
|
|
1473
|
+
}
|
|
1474
|
+
|
|
1475
|
+
if store_as_dataframe:
|
|
1476
|
+
result["dataframe_name"] = df_name
|
|
1477
|
+
result["dataframe_info"] = f"Data also stored in DataFrame '{df_name}'"
|
|
1478
|
+
|
|
1479
|
+
# Add notice if not all results returned
|
|
1480
|
+
if len(issues) < total:
|
|
1481
|
+
result["notice"] = (
|
|
1482
|
+
f"Showing {len(issues)} of {total} total issues. "
|
|
1483
|
+
f"Increase max_results (up to 1000) to get more, or "
|
|
1484
|
+
f"use summary_only=True for counts."
|
|
1485
|
+
)
|
|
1486
|
+
|
|
1487
|
+
return result
|
|
1488
|
+
|
|
1489
|
+
@tool_schema(CountIssuesInput)
|
|
1490
|
+
async def jira_count_issues(
|
|
1491
|
+
self,
|
|
1492
|
+
jql: str,
|
|
1493
|
+
group_by: Optional[List[str]] = None,
|
|
1494
|
+
) -> Dict[str, Any]:
|
|
1495
|
+
"""
|
|
1496
|
+
Count issues with optional grouping - optimized for efficiency.
|
|
1497
|
+
|
|
1498
|
+
Uses minimal fields to reduce payload size and processing time.
|
|
1499
|
+
Fetches ALL matching issues to provide accurate counts.
|
|
1500
|
+
|
|
1501
|
+
Examples:
|
|
1502
|
+
---------
|
|
1503
|
+
# Total count
|
|
1504
|
+
jira_count_issues(jql="project = NAV AND status = Open")
|
|
1505
|
+
# Returns: {"total_count": 847, "fetched_count": 847}
|
|
1506
|
+
|
|
1507
|
+
# Count by assignee
|
|
1508
|
+
jira_count_issues(
|
|
1509
|
+
jql="project = NAV AND created >= '2025-01-01'",
|
|
1510
|
+
group_by=["assignee"]
|
|
1511
|
+
)
|
|
1512
|
+
# Returns: {"total_count": 234, "by_assignee": {"John": 45, "Jane": 32, ...}}
|
|
1513
|
+
|
|
1514
|
+
# Count by multiple fields
|
|
1515
|
+
jira_count_issues(
|
|
1516
|
+
jql="project = NAV",
|
|
1517
|
+
group_by=["assignee", "status"]
|
|
1518
|
+
)
|
|
1519
|
+
"""
|
|
1520
|
+
|
|
1521
|
+
# Determine which fields we actually need based on group_by
|
|
1522
|
+
field_mapping = {
|
|
1523
|
+
'assignee': 'assignee',
|
|
1524
|
+
'reporter': 'reporter',
|
|
1525
|
+
'status': 'status',
|
|
1526
|
+
'priority': 'priority',
|
|
1527
|
+
'issuetype': 'issuetype',
|
|
1528
|
+
'project': 'project',
|
|
1529
|
+
'created_month': 'created',
|
|
1530
|
+
'created_week': 'created',
|
|
1531
|
+
}
|
|
1532
|
+
|
|
1533
|
+
needed_fields = {'key'} # Always need key for counting
|
|
1534
|
+
if group_by:
|
|
1535
|
+
for g in group_by:
|
|
1536
|
+
if g in field_mapping:
|
|
1537
|
+
needed_fields.add(field_mapping[g])
|
|
1538
|
+
else:
|
|
1539
|
+
# Default: get common grouping fields
|
|
1540
|
+
needed_fields.update(['assignee', 'status'])
|
|
1541
|
+
|
|
1542
|
+
fields_str = ','.join(needed_fields)
|
|
1543
|
+
|
|
1544
|
+
self.logger.info(f"Counting issues for JQL: {jql}")
|
|
1545
|
+
|
|
1546
|
+
# Delegate to search_issues which handles pagination
|
|
1547
|
+
# max_results=None fetches ALL matching issues
|
|
1548
|
+
search_result = await self.jira_search_issues(
|
|
1549
|
+
jql,
|
|
1550
|
+
max_results=None, # Fetch all for accurate counts
|
|
1551
|
+
fields=fields_str,
|
|
1552
|
+
json_result=True,
|
|
1553
|
+
store_as_dataframe=False
|
|
1554
|
+
)
|
|
1555
|
+
|
|
1556
|
+
# search_result is a dict: {'total': int, 'issues': list, ...}
|
|
1557
|
+
total = search_result.get('total', 0)
|
|
1558
|
+
issues = search_result.get('issues', [])
|
|
1559
|
+
|
|
1560
|
+
result = {
|
|
1561
|
+
"total_count": total,
|
|
1562
|
+
"fetched_count": len(issues),
|
|
1563
|
+
"jql": jql,
|
|
1564
|
+
}
|
|
1565
|
+
|
|
1566
|
+
if total > len(issues):
|
|
1567
|
+
result["warning"] = (
|
|
1568
|
+
f"Only fetched {len(issues)} of {total} issues. "
|
|
1569
|
+
f"Counts below are based on fetched data only. "
|
|
1570
|
+
f"Increase max_results for complete counts."
|
|
1571
|
+
)
|
|
1572
|
+
|
|
1573
|
+
if not issues:
|
|
1574
|
+
return result
|
|
1575
|
+
|
|
1576
|
+
# Convert and aggregate
|
|
1577
|
+
df = self._json_issues_to_dataframe(issues)
|
|
1578
|
+
|
|
1579
|
+
# Column mapping for user-friendly names
|
|
1580
|
+
column_mapping = {
|
|
1581
|
+
'assignee': 'assignee_name',
|
|
1582
|
+
'reporter': 'reporter_name',
|
|
1583
|
+
'status': 'status',
|
|
1584
|
+
'priority': 'priority',
|
|
1585
|
+
'issuetype': 'issuetype',
|
|
1586
|
+
'project': 'project_key',
|
|
1587
|
+
'created_month': 'created_month',
|
|
1588
|
+
'created_week': 'created_week',
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
# Generate counts
|
|
1592
|
+
groups_to_count = group_by or ['assignee', 'status']
|
|
1593
|
+
for group_field in groups_to_count:
|
|
1594
|
+
col = column_mapping.get(group_field, group_field)
|
|
1595
|
+
if col in df.columns:
|
|
1596
|
+
counts = df[col].value_counts(dropna=False).to_dict()
|
|
1597
|
+
# Clean up NaN keys
|
|
1598
|
+
counts = {
|
|
1599
|
+
("Unassigned" if pd.isna(k) else k): v
|
|
1600
|
+
for k, v in counts.items()
|
|
1601
|
+
}
|
|
1602
|
+
result[f"by_{group_field}"] = counts
|
|
1603
|
+
|
|
1604
|
+
# Multi-dimensional grouping if multiple fields
|
|
1605
|
+
if group_by and len(group_by) > 1:
|
|
1606
|
+
cols = [column_mapping.get(g, g) for g in group_by if column_mapping.get(g, g) in df.columns]
|
|
1607
|
+
if len(cols) > 1:
|
|
1608
|
+
try:
|
|
1609
|
+
pivot = df.groupby(cols, dropna=False).size().reset_index(name='count')
|
|
1610
|
+
# Convert to list of records for readability
|
|
1611
|
+
result["grouped"] = pivot.head(50).to_dict(orient='records')
|
|
1612
|
+
except Exception as e:
|
|
1613
|
+
self.logger.warning(f"Multi-group failed: {e}")
|
|
1614
|
+
|
|
1615
|
+
return result
|
|
1616
|
+
|
|
1617
|
+
@tool_schema(AggregateJiraDataInput)
|
|
1618
|
+
async def jira_aggregate_data(
|
|
1619
|
+
self,
|
|
1620
|
+
dataframe_name: str = "jira_issues",
|
|
1621
|
+
group_by: List[str] = None,
|
|
1622
|
+
aggregations: Dict[str, str] = None,
|
|
1623
|
+
sort_by: Optional[str] = None,
|
|
1624
|
+
ascending: bool = False,
|
|
1625
|
+
) -> Dict[str, Any]:
|
|
1626
|
+
"""
|
|
1627
|
+
Aggregate data from a stored Jira DataFrame.
|
|
1628
|
+
|
|
1629
|
+
Use this after jira_search_issues with fetch_all=True to perform
|
|
1630
|
+
custom aggregations on the stored data.
|
|
1631
|
+
|
|
1632
|
+
Examples:
|
|
1633
|
+
---------
|
|
1634
|
+
# Count by assignee
|
|
1635
|
+
jira_aggregate_data(
|
|
1636
|
+
dataframe_name="jira_issues",
|
|
1637
|
+
group_by=["assignee_name"],
|
|
1638
|
+
aggregations={"key": "count"}
|
|
1639
|
+
)
|
|
1640
|
+
|
|
1641
|
+
# Sum story points by status
|
|
1642
|
+
jira_aggregate_data(
|
|
1643
|
+
dataframe_name="jira_issues",
|
|
1644
|
+
group_by=["status"],
|
|
1645
|
+
aggregations={"story_points": "sum", "key": "count"},
|
|
1646
|
+
sort_by="story_points"
|
|
1647
|
+
)
|
|
1648
|
+
"""
|
|
1649
|
+
|
|
1650
|
+
if self._tool_manager is None:
|
|
1651
|
+
return {
|
|
1652
|
+
"error": "ToolManager not set. Cannot access stored DataFrames.",
|
|
1653
|
+
"suggestion": "First fetch data with jira_search_issues(fetch_all=True)"
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
try:
|
|
1657
|
+
df = self._tool_manager.get_shared_dataframe(dataframe_name)
|
|
1658
|
+
except KeyError:
|
|
1659
|
+
available = self._tool_manager.list_shared_dataframes()
|
|
1660
|
+
return {
|
|
1661
|
+
"error": f"DataFrame '{dataframe_name}' not found.",
|
|
1662
|
+
"available_dataframes": available,
|
|
1663
|
+
"suggestion": "First fetch data with jira_search_issues(fetch_all=True, dataframe_name='...')"
|
|
1664
|
+
}
|
|
1665
|
+
|
|
1666
|
+
if df.empty:
|
|
1667
|
+
return {"error": "DataFrame is empty", "row_count": 0}
|
|
1668
|
+
|
|
1669
|
+
if not group_by:
|
|
1670
|
+
group_by = ["assignee_name"]
|
|
1671
|
+
|
|
1672
|
+
if not aggregations:
|
|
1673
|
+
aggregations = {"key": "count"}
|
|
1674
|
+
|
|
1675
|
+
try:
|
|
1676
|
+
# Perform aggregation
|
|
1677
|
+
agg_result = df.groupby(group_by, dropna=False).agg(aggregations).reset_index()
|
|
1678
|
+
|
|
1679
|
+
# Flatten column names if MultiIndex
|
|
1680
|
+
if isinstance(agg_result.columns, pd.MultiIndex):
|
|
1681
|
+
agg_result.columns = ['_'.join(col).strip('_') for col in agg_result.columns]
|
|
1682
|
+
|
|
1683
|
+
# Sort if requested
|
|
1684
|
+
if sort_by and sort_by in agg_result.columns:
|
|
1685
|
+
agg_result = agg_result.sort_values(sort_by, ascending=ascending)
|
|
1686
|
+
|
|
1687
|
+
return {
|
|
1688
|
+
"success": True,
|
|
1689
|
+
"row_count": len(agg_result),
|
|
1690
|
+
"columns": list(agg_result.columns),
|
|
1691
|
+
"data": agg_result.to_dict(orient='records'),
|
|
1692
|
+
}
|
|
1693
|
+
except Exception as e:
|
|
1694
|
+
return {
|
|
1695
|
+
"error": f"Aggregation failed: {e}",
|
|
1696
|
+
"available_columns": list(df.columns),
|
|
1697
|
+
"suggestion": "Check that group_by columns exist in the DataFrame"
|
|
1698
|
+
}
|
|
1699
|
+
|
|
1700
|
+
__all__ = [
|
|
1701
|
+
"JiraToolkit",
|
|
1702
|
+
"JiraInput",
|
|
1703
|
+
"GetIssueInput",
|
|
1704
|
+
"SearchIssuesInput",
|
|
1705
|
+
"TransitionIssueInput",
|
|
1706
|
+
"AddAttachmentInput",
|
|
1707
|
+
"AssignIssueInput",
|
|
1708
|
+
"CreateIssueInput",
|
|
1709
|
+
"UpdateIssueInput",
|
|
1710
|
+
"FindIssuesByAssigneeInput",
|
|
1711
|
+
"GetTransitionsInput",
|
|
1712
|
+
"AddCommentInput",
|
|
1713
|
+
"AddWorklogInput",
|
|
1714
|
+
"GetIssueTypesInput",
|
|
1715
|
+
"GetProjectsInput",
|
|
1716
|
+
"CountIssuesInput",
|
|
1717
|
+
"AggregateIssuesInput",
|
|
1718
|
+
]
|