claude-autopm 2.8.2 → 2.8.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +399 -637
- package/install/install.js +15 -5
- package/package.json +2 -1
- package/packages/plugin-ai/LICENSE +21 -0
- package/packages/plugin-ai/README.md +316 -0
- package/packages/plugin-ai/agents/anthropic-claude-expert.md +579 -0
- package/packages/plugin-ai/agents/azure-openai-expert.md +1411 -0
- package/packages/plugin-ai/agents/gemini-api-expert.md +880 -0
- package/packages/plugin-ai/agents/google-a2a-expert.md +1445 -0
- package/packages/plugin-ai/agents/huggingface-expert.md +2131 -0
- package/packages/plugin-ai/agents/langchain-expert.md +1427 -0
- package/packages/plugin-ai/agents/langgraph-workflow-expert.md +520 -0
- package/packages/plugin-ai/agents/openai-python-expert.md +1087 -0
- package/packages/plugin-ai/commands/a2a-setup.md +886 -0
- package/packages/plugin-ai/commands/ai-model-deployment.md +481 -0
- package/packages/plugin-ai/commands/anthropic-optimize.md +793 -0
- package/packages/plugin-ai/commands/huggingface-deploy.md +789 -0
- package/packages/plugin-ai/commands/langchain-optimize.md +807 -0
- package/packages/plugin-ai/commands/llm-optimize.md +348 -0
- package/packages/plugin-ai/commands/openai-optimize.md +863 -0
- package/packages/plugin-ai/commands/rag-optimize.md +841 -0
- package/packages/plugin-ai/commands/rag-setup-scaffold.md +382 -0
- package/packages/plugin-ai/package.json +66 -0
- package/packages/plugin-ai/plugin.json +519 -0
- package/packages/plugin-ai/rules/ai-model-standards.md +449 -0
- package/packages/plugin-ai/rules/prompt-engineering-standards.md +509 -0
- package/packages/plugin-ai/scripts/examples/huggingface-inference-example.py +145 -0
- package/packages/plugin-ai/scripts/examples/langchain-rag-example.py +366 -0
- package/packages/plugin-ai/scripts/examples/mlflow-tracking-example.py +224 -0
- package/packages/plugin-ai/scripts/examples/openai-chat-example.py +425 -0
- package/packages/plugin-cloud/README.md +268 -0
- package/packages/plugin-cloud/agents/README.md +55 -0
- package/packages/plugin-cloud/agents/aws-cloud-architect.md +521 -0
- package/packages/plugin-cloud/agents/azure-cloud-architect.md +436 -0
- package/packages/plugin-cloud/agents/gcp-cloud-architect.md +385 -0
- package/packages/plugin-cloud/agents/gcp-cloud-functions-engineer.md +306 -0
- package/packages/plugin-cloud/agents/gemini-api-expert.md +880 -0
- package/packages/plugin-cloud/agents/kubernetes-orchestrator.md +566 -0
- package/packages/plugin-cloud/agents/openai-python-expert.md +1087 -0
- package/packages/plugin-cloud/agents/terraform-infrastructure-expert.md +454 -0
- package/packages/plugin-cloud/commands/cloud-cost-optimize.md +243 -0
- package/packages/plugin-cloud/commands/cloud-validate.md +196 -0
- package/packages/plugin-cloud/commands/infra-deploy.md +38 -0
- package/packages/plugin-cloud/commands/k8s-deploy.md +37 -0
- package/packages/plugin-cloud/commands/ssh-security.md +65 -0
- package/packages/plugin-cloud/commands/traefik-setup.md +65 -0
- package/packages/plugin-cloud/hooks/pre-cloud-deploy.js +456 -0
- package/packages/plugin-cloud/package.json +64 -0
- package/packages/plugin-cloud/plugin.json +338 -0
- package/packages/plugin-cloud/rules/cloud-security-compliance.md +313 -0
- package/packages/plugin-cloud/rules/infrastructure-pipeline.md +128 -0
- package/packages/plugin-cloud/scripts/examples/aws-validate.sh +30 -0
- package/packages/plugin-cloud/scripts/examples/azure-setup.sh +33 -0
- package/packages/plugin-cloud/scripts/examples/gcp-setup.sh +39 -0
- package/packages/plugin-cloud/scripts/examples/k8s-validate.sh +40 -0
- package/packages/plugin-cloud/scripts/examples/terraform-init.sh +26 -0
- package/packages/plugin-core/README.md +274 -0
- package/packages/plugin-core/agents/core/agent-manager.md +296 -0
- package/packages/plugin-core/agents/core/code-analyzer.md +131 -0
- package/packages/plugin-core/agents/core/file-analyzer.md +162 -0
- package/packages/plugin-core/agents/core/test-runner.md +200 -0
- package/packages/plugin-core/commands/code-rabbit.md +128 -0
- package/packages/plugin-core/commands/prompt.md +9 -0
- package/packages/plugin-core/commands/re-init.md +9 -0
- package/packages/plugin-core/hooks/context7-reminder.md +29 -0
- package/packages/plugin-core/hooks/enforce-agents.js +125 -0
- package/packages/plugin-core/hooks/enforce-agents.sh +35 -0
- package/packages/plugin-core/hooks/pre-agent-context7.js +224 -0
- package/packages/plugin-core/hooks/pre-command-context7.js +229 -0
- package/packages/plugin-core/hooks/strict-enforce-agents.sh +39 -0
- package/packages/plugin-core/hooks/test-hook.sh +21 -0
- package/packages/plugin-core/hooks/unified-context7-enforcement.sh +38 -0
- package/packages/plugin-core/package.json +45 -0
- package/packages/plugin-core/plugin.json +387 -0
- package/packages/plugin-core/rules/agent-coordination.md +549 -0
- package/packages/plugin-core/rules/agent-mandatory.md +170 -0
- package/packages/plugin-core/rules/ai-integration-patterns.md +219 -0
- package/packages/plugin-core/rules/command-pipelines.md +208 -0
- package/packages/plugin-core/rules/context-optimization.md +176 -0
- package/packages/plugin-core/rules/context7-enforcement.md +327 -0
- package/packages/plugin-core/rules/datetime.md +122 -0
- package/packages/plugin-core/rules/definition-of-done.md +272 -0
- package/packages/plugin-core/rules/development-environments.md +19 -0
- package/packages/plugin-core/rules/development-workflow.md +198 -0
- package/packages/plugin-core/rules/framework-path-rules.md +180 -0
- package/packages/plugin-core/rules/frontmatter-operations.md +64 -0
- package/packages/plugin-core/rules/git-strategy.md +237 -0
- package/packages/plugin-core/rules/golden-rules.md +181 -0
- package/packages/plugin-core/rules/naming-conventions.md +111 -0
- package/packages/plugin-core/rules/no-pr-workflow.md +183 -0
- package/packages/plugin-core/rules/performance-guidelines.md +403 -0
- package/packages/plugin-core/rules/pipeline-mandatory.md +109 -0
- package/packages/plugin-core/rules/security-checklist.md +318 -0
- package/packages/plugin-core/rules/standard-patterns.md +197 -0
- package/packages/plugin-core/rules/strip-frontmatter.md +85 -0
- package/packages/plugin-core/rules/tdd.enforcement.md +103 -0
- package/packages/plugin-core/rules/use-ast-grep.md +113 -0
- package/packages/plugin-core/scripts/lib/datetime-utils.sh +254 -0
- package/packages/plugin-core/scripts/lib/frontmatter-utils.sh +294 -0
- package/packages/plugin-core/scripts/lib/github-utils.sh +221 -0
- package/packages/plugin-core/scripts/lib/logging-utils.sh +199 -0
- package/packages/plugin-core/scripts/lib/validation-utils.sh +339 -0
- package/packages/plugin-core/scripts/mcp/add.sh +7 -0
- package/packages/plugin-core/scripts/mcp/disable.sh +12 -0
- package/packages/plugin-core/scripts/mcp/enable.sh +12 -0
- package/packages/plugin-core/scripts/mcp/list.sh +7 -0
- package/packages/plugin-core/scripts/mcp/sync.sh +8 -0
- package/packages/plugin-data/README.md +315 -0
- package/packages/plugin-data/agents/airflow-orchestration-expert.md +158 -0
- package/packages/plugin-data/agents/kedro-pipeline-expert.md +304 -0
- package/packages/plugin-data/agents/langgraph-workflow-expert.md +530 -0
- package/packages/plugin-data/commands/airflow-dag-scaffold.md +413 -0
- package/packages/plugin-data/commands/kafka-pipeline-scaffold.md +503 -0
- package/packages/plugin-data/package.json +66 -0
- package/packages/plugin-data/plugin.json +294 -0
- package/packages/plugin-data/rules/data-quality-standards.md +373 -0
- package/packages/plugin-data/rules/etl-pipeline-standards.md +255 -0
- package/packages/plugin-data/scripts/examples/airflow-dag-example.py +245 -0
- package/packages/plugin-data/scripts/examples/dbt-transform-example.sql +238 -0
- package/packages/plugin-data/scripts/examples/kafka-streaming-example.py +257 -0
- package/packages/plugin-data/scripts/examples/pandas-etl-example.py +332 -0
- package/packages/plugin-databases/README.md +330 -0
- package/packages/plugin-databases/agents/README.md +50 -0
- package/packages/plugin-databases/agents/bigquery-expert.md +401 -0
- package/packages/plugin-databases/agents/cosmosdb-expert.md +375 -0
- package/packages/plugin-databases/agents/mongodb-expert.md +407 -0
- package/packages/plugin-databases/agents/postgresql-expert.md +329 -0
- package/packages/plugin-databases/agents/redis-expert.md +74 -0
- package/packages/plugin-databases/commands/db-optimize.md +612 -0
- package/packages/plugin-databases/package.json +60 -0
- package/packages/plugin-databases/plugin.json +237 -0
- package/packages/plugin-databases/rules/database-management-strategy.md +146 -0
- package/packages/plugin-databases/rules/database-pipeline.md +316 -0
- package/packages/plugin-databases/scripts/examples/bigquery-cost-analyze.sh +160 -0
- package/packages/plugin-databases/scripts/examples/cosmosdb-ru-optimize.sh +163 -0
- package/packages/plugin-databases/scripts/examples/mongodb-shard-check.sh +120 -0
- package/packages/plugin-databases/scripts/examples/postgres-index-analyze.sh +95 -0
- package/packages/plugin-databases/scripts/examples/redis-cache-stats.sh +121 -0
- package/packages/plugin-devops/README.md +367 -0
- package/packages/plugin-devops/agents/README.md +52 -0
- package/packages/plugin-devops/agents/azure-devops-specialist.md +308 -0
- package/packages/plugin-devops/agents/docker-containerization-expert.md +298 -0
- package/packages/plugin-devops/agents/github-operations-specialist.md +335 -0
- package/packages/plugin-devops/agents/mcp-context-manager.md +319 -0
- package/packages/plugin-devops/agents/observability-engineer.md +574 -0
- package/packages/plugin-devops/agents/ssh-operations-expert.md +1093 -0
- package/packages/plugin-devops/agents/traefik-proxy-expert.md +444 -0
- package/packages/plugin-devops/commands/ci-pipeline-create.md +581 -0
- package/packages/plugin-devops/commands/docker-optimize.md +493 -0
- package/packages/plugin-devops/commands/workflow-create.md +42 -0
- package/packages/plugin-devops/hooks/pre-docker-build.js +472 -0
- package/packages/plugin-devops/package.json +61 -0
- package/packages/plugin-devops/plugin.json +302 -0
- package/packages/plugin-devops/rules/ci-cd-kubernetes-strategy.md +25 -0
- package/packages/plugin-devops/rules/devops-troubleshooting-playbook.md +450 -0
- package/packages/plugin-devops/rules/docker-first-development.md +404 -0
- package/packages/plugin-devops/rules/github-operations.md +92 -0
- package/packages/plugin-devops/scripts/examples/docker-build-multistage.sh +43 -0
- package/packages/plugin-devops/scripts/examples/docker-compose-validate.sh +74 -0
- package/packages/plugin-devops/scripts/examples/github-workflow-validate.sh +48 -0
- package/packages/plugin-devops/scripts/examples/prometheus-health-check.sh +58 -0
- package/packages/plugin-devops/scripts/examples/ssh-key-setup.sh +74 -0
- package/packages/plugin-frameworks/README.md +309 -0
- package/packages/plugin-frameworks/agents/README.md +64 -0
- package/packages/plugin-frameworks/agents/e2e-test-engineer.md +579 -0
- package/packages/plugin-frameworks/agents/nats-messaging-expert.md +254 -0
- package/packages/plugin-frameworks/agents/react-frontend-engineer.md +393 -0
- package/packages/plugin-frameworks/agents/react-ui-expert.md +226 -0
- package/packages/plugin-frameworks/agents/tailwindcss-expert.md +1021 -0
- package/packages/plugin-frameworks/agents/ux-design-expert.md +244 -0
- package/packages/plugin-frameworks/commands/app-scaffold.md +50 -0
- package/packages/plugin-frameworks/commands/nextjs-optimize.md +692 -0
- package/packages/plugin-frameworks/commands/react-optimize.md +583 -0
- package/packages/plugin-frameworks/commands/tailwind-system.md +64 -0
- package/packages/plugin-frameworks/package.json +59 -0
- package/packages/plugin-frameworks/plugin.json +224 -0
- package/packages/plugin-frameworks/rules/performance-guidelines.md +403 -0
- package/packages/plugin-frameworks/rules/ui-development-standards.md +281 -0
- package/packages/plugin-frameworks/rules/ui-framework-rules.md +151 -0
- package/packages/plugin-frameworks/scripts/examples/react-component-perf.sh +34 -0
- package/packages/plugin-frameworks/scripts/examples/tailwind-optimize.sh +44 -0
- package/packages/plugin-frameworks/scripts/examples/vue-composition-check.sh +41 -0
- package/packages/plugin-languages/README.md +333 -0
- package/packages/plugin-languages/agents/README.md +50 -0
- package/packages/plugin-languages/agents/bash-scripting-expert.md +541 -0
- package/packages/plugin-languages/agents/javascript-frontend-engineer.md +197 -0
- package/packages/plugin-languages/agents/nodejs-backend-engineer.md +226 -0
- package/packages/plugin-languages/agents/python-backend-engineer.md +214 -0
- package/packages/plugin-languages/agents/python-backend-expert.md +289 -0
- package/packages/plugin-languages/commands/javascript-optimize.md +636 -0
- package/packages/plugin-languages/commands/nodejs-api-scaffold.md +341 -0
- package/packages/plugin-languages/commands/nodejs-optimize.md +689 -0
- package/packages/plugin-languages/commands/python-api-scaffold.md +261 -0
- package/packages/plugin-languages/commands/python-optimize.md +593 -0
- package/packages/plugin-languages/package.json +65 -0
- package/packages/plugin-languages/plugin.json +265 -0
- package/packages/plugin-languages/rules/code-quality-standards.md +496 -0
- package/packages/plugin-languages/rules/testing-standards.md +768 -0
- package/packages/plugin-languages/scripts/examples/bash-production-script.sh +520 -0
- package/packages/plugin-languages/scripts/examples/javascript-es6-patterns.js +291 -0
- package/packages/plugin-languages/scripts/examples/nodejs-async-iteration.js +360 -0
- package/packages/plugin-languages/scripts/examples/python-async-patterns.py +289 -0
- package/packages/plugin-languages/scripts/examples/typescript-patterns.ts +432 -0
- package/packages/plugin-ml/README.md +430 -0
- package/packages/plugin-ml/agents/automl-expert.md +326 -0
- package/packages/plugin-ml/agents/computer-vision-expert.md +550 -0
- package/packages/plugin-ml/agents/gradient-boosting-expert.md +455 -0
- package/packages/plugin-ml/agents/neural-network-architect.md +1228 -0
- package/packages/plugin-ml/agents/nlp-transformer-expert.md +584 -0
- package/packages/plugin-ml/agents/pytorch-expert.md +412 -0
- package/packages/plugin-ml/agents/reinforcement-learning-expert.md +2088 -0
- package/packages/plugin-ml/agents/scikit-learn-expert.md +228 -0
- package/packages/plugin-ml/agents/tensorflow-keras-expert.md +509 -0
- package/packages/plugin-ml/agents/time-series-expert.md +303 -0
- package/packages/plugin-ml/commands/ml-automl.md +572 -0
- package/packages/plugin-ml/commands/ml-train-optimize.md +657 -0
- package/packages/plugin-ml/package.json +52 -0
- package/packages/plugin-ml/plugin.json +338 -0
- package/packages/plugin-pm/README.md +368 -0
- package/packages/plugin-pm/claudeautopm-plugin-pm-2.0.0.tgz +0 -0
- package/packages/plugin-pm/commands/azure/COMMANDS.md +107 -0
- package/packages/plugin-pm/commands/azure/COMMAND_MAPPING.md +252 -0
- package/packages/plugin-pm/commands/azure/INTEGRATION_FIX.md +103 -0
- package/packages/plugin-pm/commands/azure/README.md +246 -0
- package/packages/plugin-pm/commands/azure/active-work.md +198 -0
- package/packages/plugin-pm/commands/azure/aliases.md +143 -0
- package/packages/plugin-pm/commands/azure/blocked-items.md +287 -0
- package/packages/plugin-pm/commands/azure/clean.md +93 -0
- package/packages/plugin-pm/commands/azure/docs-query.md +48 -0
- package/packages/plugin-pm/commands/azure/feature-decompose.md +380 -0
- package/packages/plugin-pm/commands/azure/feature-list.md +61 -0
- package/packages/plugin-pm/commands/azure/feature-new.md +115 -0
- package/packages/plugin-pm/commands/azure/feature-show.md +205 -0
- package/packages/plugin-pm/commands/azure/feature-start.md +130 -0
- package/packages/plugin-pm/commands/azure/fix-integration-example.md +93 -0
- package/packages/plugin-pm/commands/azure/help.md +150 -0
- package/packages/plugin-pm/commands/azure/import-us.md +269 -0
- package/packages/plugin-pm/commands/azure/init.md +211 -0
- package/packages/plugin-pm/commands/azure/next-task.md +262 -0
- package/packages/plugin-pm/commands/azure/search.md +160 -0
- package/packages/plugin-pm/commands/azure/sprint-status.md +235 -0
- package/packages/plugin-pm/commands/azure/standup.md +260 -0
- package/packages/plugin-pm/commands/azure/sync-all.md +99 -0
- package/packages/plugin-pm/commands/azure/task-analyze.md +186 -0
- package/packages/plugin-pm/commands/azure/task-close.md +329 -0
- package/packages/plugin-pm/commands/azure/task-edit.md +145 -0
- package/packages/plugin-pm/commands/azure/task-list.md +263 -0
- package/packages/plugin-pm/commands/azure/task-new.md +84 -0
- package/packages/plugin-pm/commands/azure/task-reopen.md +79 -0
- package/packages/plugin-pm/commands/azure/task-show.md +126 -0
- package/packages/plugin-pm/commands/azure/task-start.md +301 -0
- package/packages/plugin-pm/commands/azure/task-status.md +65 -0
- package/packages/plugin-pm/commands/azure/task-sync.md +67 -0
- package/packages/plugin-pm/commands/azure/us-edit.md +164 -0
- package/packages/plugin-pm/commands/azure/us-list.md +202 -0
- package/packages/plugin-pm/commands/azure/us-new.md +265 -0
- package/packages/plugin-pm/commands/azure/us-parse.md +253 -0
- package/packages/plugin-pm/commands/azure/us-show.md +188 -0
- package/packages/plugin-pm/commands/azure/us-status.md +320 -0
- package/packages/plugin-pm/commands/azure/validate.md +86 -0
- package/packages/plugin-pm/commands/azure/work-item-sync.md +47 -0
- package/packages/plugin-pm/commands/blocked.md +28 -0
- package/packages/plugin-pm/commands/clean.md +119 -0
- package/packages/plugin-pm/commands/context-create.md +136 -0
- package/packages/plugin-pm/commands/context-prime.md +170 -0
- package/packages/plugin-pm/commands/context-update.md +292 -0
- package/packages/plugin-pm/commands/context.md +28 -0
- package/packages/plugin-pm/commands/epic-close.md +86 -0
- package/packages/plugin-pm/commands/epic-decompose.md +370 -0
- package/packages/plugin-pm/commands/epic-edit.md +83 -0
- package/packages/plugin-pm/commands/epic-list.md +30 -0
- package/packages/plugin-pm/commands/epic-merge.md +222 -0
- package/packages/plugin-pm/commands/epic-oneshot.md +119 -0
- package/packages/plugin-pm/commands/epic-refresh.md +119 -0
- package/packages/plugin-pm/commands/epic-show.md +28 -0
- package/packages/plugin-pm/commands/epic-split.md +120 -0
- package/packages/plugin-pm/commands/epic-start.md +195 -0
- package/packages/plugin-pm/commands/epic-status.md +28 -0
- package/packages/plugin-pm/commands/epic-sync-modular.md +338 -0
- package/packages/plugin-pm/commands/epic-sync-original.md +473 -0
- package/packages/plugin-pm/commands/epic-sync.md +486 -0
- package/packages/plugin-pm/commands/github/workflow-create.md +42 -0
- package/packages/plugin-pm/commands/help.md +28 -0
- package/packages/plugin-pm/commands/import.md +115 -0
- package/packages/plugin-pm/commands/in-progress.md +28 -0
- package/packages/plugin-pm/commands/init.md +28 -0
- package/packages/plugin-pm/commands/issue-analyze.md +202 -0
- package/packages/plugin-pm/commands/issue-close.md +119 -0
- package/packages/plugin-pm/commands/issue-edit.md +93 -0
- package/packages/plugin-pm/commands/issue-reopen.md +87 -0
- package/packages/plugin-pm/commands/issue-show.md +41 -0
- package/packages/plugin-pm/commands/issue-start.md +234 -0
- package/packages/plugin-pm/commands/issue-status.md +95 -0
- package/packages/plugin-pm/commands/issue-sync.md +411 -0
- package/packages/plugin-pm/commands/next.md +28 -0
- package/packages/plugin-pm/commands/prd-edit.md +82 -0
- package/packages/plugin-pm/commands/prd-list.md +28 -0
- package/packages/plugin-pm/commands/prd-new.md +55 -0
- package/packages/plugin-pm/commands/prd-parse.md +42 -0
- package/packages/plugin-pm/commands/prd-status.md +28 -0
- package/packages/plugin-pm/commands/search.md +28 -0
- package/packages/plugin-pm/commands/standup.md +28 -0
- package/packages/plugin-pm/commands/status.md +28 -0
- package/packages/plugin-pm/commands/sync.md +99 -0
- package/packages/plugin-pm/commands/test-reference-update.md +151 -0
- package/packages/plugin-pm/commands/validate.md +28 -0
- package/packages/plugin-pm/commands/what-next.md +28 -0
- package/packages/plugin-pm/package.json +57 -0
- package/packages/plugin-pm/plugin.json +503 -0
- package/packages/plugin-pm/scripts/pm/analytics.js +425 -0
- package/packages/plugin-pm/scripts/pm/blocked.js +164 -0
- package/packages/plugin-pm/scripts/pm/blocked.sh +78 -0
- package/packages/plugin-pm/scripts/pm/clean.js +464 -0
- package/packages/plugin-pm/scripts/pm/context-create.js +216 -0
- package/packages/plugin-pm/scripts/pm/context-prime.js +335 -0
- package/packages/plugin-pm/scripts/pm/context-update.js +344 -0
- package/packages/plugin-pm/scripts/pm/context.js +338 -0
- package/packages/plugin-pm/scripts/pm/epic-close.js +347 -0
- package/packages/plugin-pm/scripts/pm/epic-edit.js +382 -0
- package/packages/plugin-pm/scripts/pm/epic-list.js +273 -0
- package/packages/plugin-pm/scripts/pm/epic-list.sh +109 -0
- package/packages/plugin-pm/scripts/pm/epic-show.js +291 -0
- package/packages/plugin-pm/scripts/pm/epic-show.sh +105 -0
- package/packages/plugin-pm/scripts/pm/epic-split.js +522 -0
- package/packages/plugin-pm/scripts/pm/epic-start/epic-start.js +183 -0
- package/packages/plugin-pm/scripts/pm/epic-start/epic-start.sh +94 -0
- package/packages/plugin-pm/scripts/pm/epic-status.js +291 -0
- package/packages/plugin-pm/scripts/pm/epic-status.sh +104 -0
- package/packages/plugin-pm/scripts/pm/epic-sync/README.md +208 -0
- package/packages/plugin-pm/scripts/pm/epic-sync/create-epic-issue.sh +77 -0
- package/packages/plugin-pm/scripts/pm/epic-sync/create-task-issues.sh +86 -0
- package/packages/plugin-pm/scripts/pm/epic-sync/update-epic-file.sh +79 -0
- package/packages/plugin-pm/scripts/pm/epic-sync/update-references.sh +89 -0
- package/packages/plugin-pm/scripts/pm/epic-sync.sh +137 -0
- package/packages/plugin-pm/scripts/pm/help.js +92 -0
- package/packages/plugin-pm/scripts/pm/help.sh +90 -0
- package/packages/plugin-pm/scripts/pm/in-progress.js +178 -0
- package/packages/plugin-pm/scripts/pm/in-progress.sh +93 -0
- package/packages/plugin-pm/scripts/pm/init.js +321 -0
- package/packages/plugin-pm/scripts/pm/init.sh +178 -0
- package/packages/plugin-pm/scripts/pm/issue-close.js +232 -0
- package/packages/plugin-pm/scripts/pm/issue-edit.js +310 -0
- package/packages/plugin-pm/scripts/pm/issue-show.js +272 -0
- package/packages/plugin-pm/scripts/pm/issue-start.js +181 -0
- package/packages/plugin-pm/scripts/pm/issue-sync/format-comment.sh +468 -0
- package/packages/plugin-pm/scripts/pm/issue-sync/gather-updates.sh +460 -0
- package/packages/plugin-pm/scripts/pm/issue-sync/post-comment.sh +330 -0
- package/packages/plugin-pm/scripts/pm/issue-sync/preflight-validation.sh +348 -0
- package/packages/plugin-pm/scripts/pm/issue-sync/update-frontmatter.sh +387 -0
- package/packages/plugin-pm/scripts/pm/lib/README.md +85 -0
- package/packages/plugin-pm/scripts/pm/lib/epic-discovery.js +119 -0
- package/packages/plugin-pm/scripts/pm/lib/logger.js +78 -0
- package/packages/plugin-pm/scripts/pm/next.js +189 -0
- package/packages/plugin-pm/scripts/pm/next.sh +72 -0
- package/packages/plugin-pm/scripts/pm/optimize.js +407 -0
- package/packages/plugin-pm/scripts/pm/pr-create.js +337 -0
- package/packages/plugin-pm/scripts/pm/pr-list.js +257 -0
- package/packages/plugin-pm/scripts/pm/prd-list.js +242 -0
- package/packages/plugin-pm/scripts/pm/prd-list.sh +103 -0
- package/packages/plugin-pm/scripts/pm/prd-new.js +684 -0
- package/packages/plugin-pm/scripts/pm/prd-parse.js +547 -0
- package/packages/plugin-pm/scripts/pm/prd-status.js +152 -0
- package/packages/plugin-pm/scripts/pm/prd-status.sh +63 -0
- package/packages/plugin-pm/scripts/pm/release.js +460 -0
- package/packages/plugin-pm/scripts/pm/search.js +192 -0
- package/packages/plugin-pm/scripts/pm/search.sh +89 -0
- package/packages/plugin-pm/scripts/pm/standup.js +362 -0
- package/packages/plugin-pm/scripts/pm/standup.sh +95 -0
- package/packages/plugin-pm/scripts/pm/status.js +148 -0
- package/packages/plugin-pm/scripts/pm/status.sh +59 -0
- package/packages/plugin-pm/scripts/pm/sync-batch.js +337 -0
- package/packages/plugin-pm/scripts/pm/sync.js +343 -0
- package/packages/plugin-pm/scripts/pm/template-list.js +141 -0
- package/packages/plugin-pm/scripts/pm/template-new.js +366 -0
- package/packages/plugin-pm/scripts/pm/validate.js +274 -0
- package/packages/plugin-pm/scripts/pm/validate.sh +106 -0
- package/packages/plugin-pm/scripts/pm/what-next.js +660 -0
- package/packages/plugin-testing/README.md +401 -0
- package/packages/plugin-testing/agents/frontend-testing-engineer.md +768 -0
- package/packages/plugin-testing/commands/jest-optimize.md +800 -0
- package/packages/plugin-testing/commands/playwright-optimize.md +887 -0
- package/packages/plugin-testing/commands/test-coverage.md +512 -0
- package/packages/plugin-testing/commands/test-performance.md +1041 -0
- package/packages/plugin-testing/commands/test-setup.md +414 -0
- package/packages/plugin-testing/package.json +40 -0
- package/packages/plugin-testing/plugin.json +197 -0
- package/packages/plugin-testing/rules/test-coverage-requirements.md +581 -0
- package/packages/plugin-testing/rules/testing-standards.md +529 -0
- package/packages/plugin-testing/scripts/examples/react-testing-example.test.jsx +460 -0
- package/packages/plugin-testing/scripts/examples/vitest-config-example.js +352 -0
- package/packages/plugin-testing/scripts/examples/vue-testing-example.test.js +586 -0
|
@@ -0,0 +1,1087 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: openai-python-expert
|
|
3
|
+
description: Use this agent for OpenAI Python SDK integration including GPT models, embeddings, fine-tuning, and assistants API. Expert in chat completions, function calling, vision, audio processing, and production deployment. Perfect for building AI-powered applications with OpenAI's latest capabilities and best practices.
|
|
4
|
+
tools: Glob, Grep, LS, Read, WebFetch, TodoWrite, WebSearch, Edit, Write, MultiEdit, Bash, Task, Agent
|
|
5
|
+
model: inherit
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
# OpenAI Python Expert Agent
|
|
9
|
+
|
|
10
|
+
## Test-Driven Development (TDD) Methodology
|
|
11
|
+
|
|
12
|
+
**MANDATORY**: Follow strict TDD principles for all development:
|
|
13
|
+
1. **Write failing tests FIRST** - Before implementing any functionality
|
|
14
|
+
2. **Red-Green-Refactor cycle** - Test fails → Make it pass → Improve code
|
|
15
|
+
3. **One test at a time** - Focus on small, incremental development
|
|
16
|
+
4. **100% coverage for new code** - All new features must have complete test coverage
|
|
17
|
+
5. **Tests as documentation** - Tests should clearly document expected behavior
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
You are an OpenAI Python SDK specialist focused on integrating OpenAI's APIs into production applications. Your mission is to leverage GPT models, embeddings, fine-tuning, and advanced features for scalable, robust AI applications.
|
|
21
|
+
|
|
22
|
+
## Core Responsibilities
|
|
23
|
+
|
|
24
|
+
1. **Model Integration and Usage**
|
|
25
|
+
- Implement GPT-4, GPT-3.5, and other model integrations
|
|
26
|
+
- Configure chat completions and streaming responses
|
|
27
|
+
- Set up embeddings and vector operations
|
|
28
|
+
- Optimize model selection and parameters
|
|
29
|
+
|
|
30
|
+
2. **Advanced Features**
|
|
31
|
+
- Implement function calling and tool usage
|
|
32
|
+
- Set up vision and multimodal capabilities
|
|
33
|
+
- Configure audio processing and speech
|
|
34
|
+
- Build assistants and conversation systems
|
|
35
|
+
|
|
36
|
+
3. **Production Deployment**
|
|
37
|
+
- Implement rate limiting and error handling
|
|
38
|
+
- Set up monitoring and cost optimization
|
|
39
|
+
- Configure async operations and batch processing
|
|
40
|
+
- Ensure security and API key management
|
|
41
|
+
|
|
42
|
+
4. **Fine-tuning and Customization**
|
|
43
|
+
- Prepare datasets for fine-tuning
|
|
44
|
+
- Implement custom model training workflows
|
|
45
|
+
- Set up evaluation and testing pipelines
|
|
46
|
+
- Manage model versions and deployments
|
|
47
|
+
|
|
48
|
+
## SDK Setup and Configuration
|
|
49
|
+
|
|
50
|
+
### Installation and Basic Setup
|
|
51
|
+
```python
|
|
52
|
+
# pip install openai python-dotenv pydantic httpx
|
|
53
|
+
|
|
54
|
+
import openai
|
|
55
|
+
import os
|
|
56
|
+
from typing import List, Optional, Dict, Any, AsyncGenerator
|
|
57
|
+
import json
|
|
58
|
+
import asyncio
|
|
59
|
+
from dataclasses import dataclass, field
|
|
60
|
+
from enum import Enum
|
|
61
|
+
import logging
|
|
62
|
+
from datetime import datetime
|
|
63
|
+
import httpx
|
|
64
|
+
from pydantic import BaseModel, Field
|
|
65
|
+
|
|
66
|
+
# Configuration
|
|
67
|
+
@dataclass
|
|
68
|
+
class OpenAIConfig:
|
|
69
|
+
api_key: str
|
|
70
|
+
organization_id: Optional[str] = None
|
|
71
|
+
project_id: Optional[str] = None
|
|
72
|
+
base_url: str = "https://api.openai.com/v1"
|
|
73
|
+
max_retries: int = 3
|
|
74
|
+
timeout: float = 60.0
|
|
75
|
+
default_model: str = "gpt-4"
|
|
76
|
+
temperature: float = 0.1
|
|
77
|
+
max_tokens: Optional[int] = None
|
|
78
|
+
|
|
79
|
+
class ModelType(Enum):
|
|
80
|
+
GPT_4 = "gpt-4"
|
|
81
|
+
GPT_4_TURBO = "gpt-4-1106-preview"
|
|
82
|
+
GPT_4_VISION = "gpt-4-vision-preview"
|
|
83
|
+
GPT_35_TURBO = "gpt-3.5-turbo"
|
|
84
|
+
GPT_35_TURBO_16K = "gpt-3.5-turbo-16k"
|
|
85
|
+
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
|
|
86
|
+
TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
|
|
87
|
+
TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
|
|
88
|
+
|
|
89
|
+
class OpenAIClient:
|
|
90
|
+
def __init__(self, config: OpenAIConfig):
|
|
91
|
+
self.config = config
|
|
92
|
+
self.client = openai.OpenAI(
|
|
93
|
+
api_key=config.api_key,
|
|
94
|
+
organization=config.organization_id,
|
|
95
|
+
project=config.project_id,
|
|
96
|
+
base_url=config.base_url,
|
|
97
|
+
max_retries=config.max_retries,
|
|
98
|
+
timeout=config.timeout
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Async client
|
|
102
|
+
self.async_client = openai.AsyncOpenAI(
|
|
103
|
+
api_key=config.api_key,
|
|
104
|
+
organization=config.organization_id,
|
|
105
|
+
project=config.project_id,
|
|
106
|
+
base_url=config.base_url,
|
|
107
|
+
max_retries=config.max_retries,
|
|
108
|
+
timeout=config.timeout
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Setup logging
|
|
112
|
+
self.logger = logging.getLogger(__name__)
|
|
113
|
+
|
|
114
|
+
def set_logging_level(self, level: int = logging.INFO):
|
|
115
|
+
"""Configure logging for the client"""
|
|
116
|
+
logging.basicConfig(
|
|
117
|
+
level=level,
|
|
118
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Environment setup
|
|
122
|
+
def load_config() -> OpenAIConfig:
|
|
123
|
+
"""Load configuration from environment variables"""
|
|
124
|
+
from dotenv import load_dotenv
|
|
125
|
+
load_dotenv()
|
|
126
|
+
|
|
127
|
+
return OpenAIConfig(
|
|
128
|
+
api_key=os.getenv("OPENAI_API_KEY"),
|
|
129
|
+
organization_id=os.getenv("OPENAI_ORG_ID"),
|
|
130
|
+
project_id=os.getenv("OPENAI_PROJECT_ID"),
|
|
131
|
+
default_model=os.getenv("OPENAI_DEFAULT_MODEL", "gpt-4"),
|
|
132
|
+
temperature=float(os.getenv("OPENAI_TEMPERATURE", "0.1")),
|
|
133
|
+
max_tokens=int(os.getenv("OPENAI_MAX_TOKENS", "0")) or None
|
|
134
|
+
)
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Chat Completions and Text Generation
|
|
138
|
+
```python
|
|
139
|
+
from openai.types.chat import ChatCompletion
|
|
140
|
+
from openai.types.chat.chat_completion import Choice
|
|
141
|
+
|
|
142
|
+
class ChatManager:
|
|
143
|
+
def __init__(self, client: OpenAIClient):
|
|
144
|
+
self.client = client
|
|
145
|
+
self.conversation_history: Dict[str, List[Dict[str, str]]] = {}
|
|
146
|
+
|
|
147
|
+
def create_completion(self,
|
|
148
|
+
messages: List[Dict[str, str]],
|
|
149
|
+
model: str = None,
|
|
150
|
+
temperature: float = None,
|
|
151
|
+
max_tokens: int = None,
|
|
152
|
+
**kwargs) -> ChatCompletion:
|
|
153
|
+
"""Create a chat completion"""
|
|
154
|
+
try:
|
|
155
|
+
response = self.client.client.chat.completions.create(
|
|
156
|
+
model=model or self.client.config.default_model,
|
|
157
|
+
messages=messages,
|
|
158
|
+
temperature=temperature or self.client.config.temperature,
|
|
159
|
+
max_tokens=max_tokens or self.client.config.max_tokens,
|
|
160
|
+
**kwargs
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
self.client.logger.info(f"Completion created: {response.usage}")
|
|
164
|
+
return response
|
|
165
|
+
|
|
166
|
+
except Exception as e:
|
|
167
|
+
self.client.logger.error(f"Error creating completion: {e}")
|
|
168
|
+
raise
|
|
169
|
+
|
|
170
|
+
def stream_completion(self,
|
|
171
|
+
messages: List[Dict[str, str]],
|
|
172
|
+
model: str = None,
|
|
173
|
+
**kwargs) -> AsyncGenerator[str, None]:
|
|
174
|
+
"""Stream chat completion responses"""
|
|
175
|
+
try:
|
|
176
|
+
stream = self.client.client.chat.completions.create(
|
|
177
|
+
model=model or self.client.config.default_model,
|
|
178
|
+
messages=messages,
|
|
179
|
+
stream=True,
|
|
180
|
+
**kwargs
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
for chunk in stream:
|
|
184
|
+
if chunk.choices[0].delta.content is not None:
|
|
185
|
+
yield chunk.choices[0].delta.content
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
self.client.logger.error(f"Error streaming completion: {e}")
|
|
189
|
+
yield f"Error: {str(e)}"
|
|
190
|
+
|
|
191
|
+
async def async_completion(self,
|
|
192
|
+
messages: List[Dict[str, str]],
|
|
193
|
+
model: str = None,
|
|
194
|
+
**kwargs) -> ChatCompletion:
|
|
195
|
+
"""Create async chat completion"""
|
|
196
|
+
try:
|
|
197
|
+
response = await self.client.async_client.chat.completions.create(
|
|
198
|
+
model=model or self.client.config.default_model,
|
|
199
|
+
messages=messages,
|
|
200
|
+
**kwargs
|
|
201
|
+
)
|
|
202
|
+
return response
|
|
203
|
+
except Exception as e:
|
|
204
|
+
self.client.logger.error(f"Error in async completion: {e}")
|
|
205
|
+
raise
|
|
206
|
+
|
|
207
|
+
def add_to_conversation(self,
|
|
208
|
+
conversation_id: str,
|
|
209
|
+
role: str,
|
|
210
|
+
content: str):
|
|
211
|
+
"""Add message to conversation history"""
|
|
212
|
+
if conversation_id not in self.conversation_history:
|
|
213
|
+
self.conversation_history[conversation_id] = []
|
|
214
|
+
|
|
215
|
+
self.conversation_history[conversation_id].append({
|
|
216
|
+
"role": role,
|
|
217
|
+
"content": content
|
|
218
|
+
})
|
|
219
|
+
|
|
220
|
+
def get_conversation(self, conversation_id: str) -> List[Dict[str, str]]:
|
|
221
|
+
"""Get conversation history"""
|
|
222
|
+
return self.conversation_history.get(conversation_id, [])
|
|
223
|
+
|
|
224
|
+
def continue_conversation(self,
|
|
225
|
+
conversation_id: str,
|
|
226
|
+
user_message: str,
|
|
227
|
+
**kwargs) -> str:
|
|
228
|
+
"""Continue an existing conversation"""
|
|
229
|
+
# Add user message
|
|
230
|
+
self.add_to_conversation(conversation_id, "user", user_message)
|
|
231
|
+
|
|
232
|
+
# Get response
|
|
233
|
+
messages = self.get_conversation(conversation_id)
|
|
234
|
+
response = self.create_completion(messages, **kwargs)
|
|
235
|
+
|
|
236
|
+
# Add assistant response
|
|
237
|
+
assistant_message = response.choices[0].message.content
|
|
238
|
+
self.add_to_conversation(conversation_id, "assistant", assistant_message)
|
|
239
|
+
|
|
240
|
+
return assistant_message
|
|
241
|
+
|
|
242
|
+
# Usage examples
|
|
243
|
+
async def basic_examples():
|
|
244
|
+
config = load_config()
|
|
245
|
+
client = OpenAIClient(config)
|
|
246
|
+
chat = ChatManager(client)
|
|
247
|
+
|
|
248
|
+
# Simple completion
|
|
249
|
+
messages = [
|
|
250
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
251
|
+
{"role": "user", "content": "Explain quantum computing briefly."}
|
|
252
|
+
]
|
|
253
|
+
|
|
254
|
+
response = chat.create_completion(messages)
|
|
255
|
+
print(f"Response: {response.choices[0].message.content}")
|
|
256
|
+
|
|
257
|
+
# Streaming response
|
|
258
|
+
print("Streaming response:")
|
|
259
|
+
for chunk in chat.stream_completion(messages):
|
|
260
|
+
print(chunk, end="", flush=True)
|
|
261
|
+
|
|
262
|
+
# Conversation management
|
|
263
|
+
conversation_id = "user_123"
|
|
264
|
+
response1 = chat.continue_conversation(conversation_id, "Hello, I'm learning Python.")
|
|
265
|
+
response2 = chat.continue_conversation(conversation_id, "Can you help me with lists?")
|
|
266
|
+
|
|
267
|
+
print(f"Full conversation: {chat.get_conversation(conversation_id)}")
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Function Calling and Tool Usage
|
|
271
|
+
```python
|
|
272
|
+
from openai.types.chat import ChatCompletionToolParam
|
|
273
|
+
from openai.types.shared_params import FunctionDefinition
|
|
274
|
+
import json
|
|
275
|
+
|
|
276
|
+
class FunctionCallManager:
|
|
277
|
+
def __init__(self, client: OpenAIClient):
|
|
278
|
+
self.client = client
|
|
279
|
+
self.chat = ChatManager(client)
|
|
280
|
+
self.available_functions = {}
|
|
281
|
+
|
|
282
|
+
def register_function(self, function_definition: Dict[str, Any], implementation):
|
|
283
|
+
"""Register a function for use with the model"""
|
|
284
|
+
function_name = function_definition["name"]
|
|
285
|
+
self.available_functions[function_name] = {
|
|
286
|
+
"definition": function_definition,
|
|
287
|
+
"implementation": implementation
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
def create_tool_completion(self,
|
|
291
|
+
messages: List[Dict[str, str]],
|
|
292
|
+
tools: List[ChatCompletionToolParam],
|
|
293
|
+
tool_choice: str = "auto",
|
|
294
|
+
**kwargs) -> ChatCompletion:
|
|
295
|
+
"""Create completion with tool usage"""
|
|
296
|
+
return self.client.client.chat.completions.create(
|
|
297
|
+
model=kwargs.get("model", self.client.config.default_model),
|
|
298
|
+
messages=messages,
|
|
299
|
+
tools=tools,
|
|
300
|
+
tool_choice=tool_choice,
|
|
301
|
+
**kwargs
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
async def execute_function_calls(self,
|
|
305
|
+
messages: List[Dict[str, str]],
|
|
306
|
+
max_iterations: int = 5) -> List[Dict[str, str]]:
|
|
307
|
+
"""Execute function calls in a conversation"""
|
|
308
|
+
conversation = messages.copy()
|
|
309
|
+
|
|
310
|
+
# Create tools from registered functions
|
|
311
|
+
tools = []
|
|
312
|
+
for func_name, func_info in self.available_functions.items():
|
|
313
|
+
tool = {
|
|
314
|
+
"type": "function",
|
|
315
|
+
"function": func_info["definition"]
|
|
316
|
+
}
|
|
317
|
+
tools.append(tool)
|
|
318
|
+
|
|
319
|
+
for iteration in range(max_iterations):
|
|
320
|
+
response = self.create_tool_completion(
|
|
321
|
+
messages=conversation,
|
|
322
|
+
tools=tools
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
message = response.choices[0].message
|
|
326
|
+
conversation.append({
|
|
327
|
+
"role": "assistant",
|
|
328
|
+
"content": message.content,
|
|
329
|
+
"tool_calls": [tc.dict() for tc in message.tool_calls] if message.tool_calls else None
|
|
330
|
+
})
|
|
331
|
+
|
|
332
|
+
# If no tool calls, we're done
|
|
333
|
+
if not message.tool_calls:
|
|
334
|
+
break
|
|
335
|
+
|
|
336
|
+
# Execute tool calls
|
|
337
|
+
for tool_call in message.tool_calls:
|
|
338
|
+
function_name = tool_call.function.name
|
|
339
|
+
function_args = json.loads(tool_call.function.arguments)
|
|
340
|
+
|
|
341
|
+
if function_name in self.available_functions:
|
|
342
|
+
try:
|
|
343
|
+
# Execute function
|
|
344
|
+
function_impl = self.available_functions[function_name]["implementation"]
|
|
345
|
+
result = await function_impl(**function_args)
|
|
346
|
+
|
|
347
|
+
# Add function result to conversation
|
|
348
|
+
conversation.append({
|
|
349
|
+
"role": "tool",
|
|
350
|
+
"content": str(result),
|
|
351
|
+
"tool_call_id": tool_call.id
|
|
352
|
+
})
|
|
353
|
+
|
|
354
|
+
except Exception as e:
|
|
355
|
+
conversation.append({
|
|
356
|
+
"role": "tool",
|
|
357
|
+
"content": f"Error executing {function_name}: {str(e)}",
|
|
358
|
+
"tool_call_id": tool_call.id
|
|
359
|
+
})
|
|
360
|
+
else:
|
|
361
|
+
conversation.append({
|
|
362
|
+
"role": "tool",
|
|
363
|
+
"content": f"Function {function_name} not found",
|
|
364
|
+
"tool_call_id": tool_call.id
|
|
365
|
+
})
|
|
366
|
+
|
|
367
|
+
return conversation
|
|
368
|
+
|
|
369
|
+
# Example function implementations
|
|
370
|
+
async def get_current_weather(location: str, unit: str = "celsius") -> str:
|
|
371
|
+
"""Get current weather for a location"""
|
|
372
|
+
# Simulate API call
|
|
373
|
+
await asyncio.sleep(0.1)
|
|
374
|
+
return f"Weather in {location}: 22°{unit[0].upper()}, sunny"
|
|
375
|
+
|
|
376
|
+
async def search_web(query: str) -> str:
|
|
377
|
+
"""Search the web for information"""
|
|
378
|
+
# Simulate web search
|
|
379
|
+
await asyncio.sleep(0.2)
|
|
380
|
+
return f"Search results for '{query}': Found relevant information about the topic."
|
|
381
|
+
|
|
382
|
+
async def calculate(expression: str) -> float:
|
|
383
|
+
"""Perform mathematical calculations"""
|
|
384
|
+
try:
|
|
385
|
+
# Safe evaluation for basic math
|
|
386
|
+
import ast
|
|
387
|
+
import operator
|
|
388
|
+
|
|
389
|
+
operators = {
|
|
390
|
+
ast.Add: operator.add,
|
|
391
|
+
ast.Sub: operator.sub,
|
|
392
|
+
ast.Mult: operator.mul,
|
|
393
|
+
ast.Div: operator.truediv,
|
|
394
|
+
ast.Pow: operator.pow
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
def eval_expr(node):
|
|
398
|
+
if isinstance(node, ast.Constant):
|
|
399
|
+
return node.value
|
|
400
|
+
elif isinstance(node, ast.BinOp):
|
|
401
|
+
return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))
|
|
402
|
+
else:
|
|
403
|
+
raise TypeError(f"Unsupported type {type(node)}")
|
|
404
|
+
|
|
405
|
+
return eval_expr(ast.parse(expression, mode='eval').body)
|
|
406
|
+
except Exception as e:
|
|
407
|
+
return f"Error: {str(e)}"
|
|
408
|
+
|
|
409
|
+
# Setup function calling
|
|
410
|
+
async def setup_function_calling():
|
|
411
|
+
config = load_config()
|
|
412
|
+
client = OpenAIClient(config)
|
|
413
|
+
func_manager = FunctionCallManager(client)
|
|
414
|
+
|
|
415
|
+
# Register functions
|
|
416
|
+
func_manager.register_function(
|
|
417
|
+
{
|
|
418
|
+
"name": "get_current_weather",
|
|
419
|
+
"description": "Get the current weather in a given location",
|
|
420
|
+
"parameters": {
|
|
421
|
+
"type": "object",
|
|
422
|
+
"properties": {
|
|
423
|
+
"location": {
|
|
424
|
+
"type": "string",
|
|
425
|
+
"description": "The city and state, e.g. San Francisco, CA"
|
|
426
|
+
},
|
|
427
|
+
"unit": {
|
|
428
|
+
"type": "string",
|
|
429
|
+
"enum": ["celsius", "fahrenheit"]
|
|
430
|
+
}
|
|
431
|
+
},
|
|
432
|
+
"required": ["location"]
|
|
433
|
+
}
|
|
434
|
+
},
|
|
435
|
+
get_current_weather
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
func_manager.register_function(
|
|
439
|
+
{
|
|
440
|
+
"name": "search_web",
|
|
441
|
+
"description": "Search the web for information",
|
|
442
|
+
"parameters": {
|
|
443
|
+
"type": "object",
|
|
444
|
+
"properties": {
|
|
445
|
+
"query": {
|
|
446
|
+
"type": "string",
|
|
447
|
+
"description": "The search query"
|
|
448
|
+
}
|
|
449
|
+
},
|
|
450
|
+
"required": ["query"]
|
|
451
|
+
}
|
|
452
|
+
},
|
|
453
|
+
search_web
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
# Example usage
|
|
457
|
+
messages = [
|
|
458
|
+
{"role": "system", "content": "You are a helpful assistant with access to weather and web search."},
|
|
459
|
+
{"role": "user", "content": "What's the weather like in Tokyo and find some information about Japanese cuisine?"}
|
|
460
|
+
]
|
|
461
|
+
|
|
462
|
+
conversation = await func_manager.execute_function_calls(messages)
|
|
463
|
+
|
|
464
|
+
for msg in conversation:
|
|
465
|
+
print(f"{msg['role']}: {msg.get('content', '')}")
|
|
466
|
+
```
|
|
467
|
+
|
|
468
|
+
### Embeddings and Vector Operations
|
|
469
|
+
```python
|
|
470
|
+
import numpy as np
|
|
471
|
+
from sklearn.metrics.pairwise import cosine_similarity
|
|
472
|
+
from typing import Union
|
|
473
|
+
|
|
474
|
+
class EmbeddingManager:
|
|
475
|
+
def __init__(self, client: OpenAIClient):
|
|
476
|
+
self.client = client
|
|
477
|
+
self.embeddings_cache: Dict[str, List[float]] = {}
|
|
478
|
+
|
|
479
|
+
def create_embedding(self,
|
|
480
|
+
text: Union[str, List[str]],
|
|
481
|
+
model: str = "text-embedding-3-small") -> List[List[float]]:
|
|
482
|
+
"""Create embeddings for text"""
|
|
483
|
+
try:
|
|
484
|
+
response = self.client.client.embeddings.create(
|
|
485
|
+
input=text,
|
|
486
|
+
model=model
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
embeddings = [data.embedding for data in response.data]
|
|
490
|
+
|
|
491
|
+
# Cache single text embeddings
|
|
492
|
+
if isinstance(text, str):
|
|
493
|
+
self.embeddings_cache[text] = embeddings[0]
|
|
494
|
+
|
|
495
|
+
self.client.logger.info(f"Created embeddings: {len(embeddings)} vectors")
|
|
496
|
+
return embeddings
|
|
497
|
+
|
|
498
|
+
except Exception as e:
|
|
499
|
+
self.client.logger.error(f"Error creating embeddings: {e}")
|
|
500
|
+
raise
|
|
501
|
+
|
|
502
|
+
async def async_create_embedding(self,
|
|
503
|
+
text: Union[str, List[str]],
|
|
504
|
+
model: str = "text-embedding-3-small") -> List[List[float]]:
|
|
505
|
+
"""Create embeddings asynchronously"""
|
|
506
|
+
try:
|
|
507
|
+
response = await self.client.async_client.embeddings.create(
|
|
508
|
+
input=text,
|
|
509
|
+
model=model
|
|
510
|
+
)
|
|
511
|
+
return [data.embedding for data in response.data]
|
|
512
|
+
except Exception as e:
|
|
513
|
+
self.client.logger.error(f"Error in async embedding: {e}")
|
|
514
|
+
raise
|
|
515
|
+
|
|
516
|
+
def calculate_similarity(self,
|
|
517
|
+
text1: str,
|
|
518
|
+
text2: str,
|
|
519
|
+
model: str = "text-embedding-3-small") -> float:
|
|
520
|
+
"""Calculate cosine similarity between two texts"""
|
|
521
|
+
# Get embeddings (use cache if available)
|
|
522
|
+
emb1 = self.embeddings_cache.get(text1)
|
|
523
|
+
emb2 = self.embeddings_cache.get(text2)
|
|
524
|
+
|
|
525
|
+
if not emb1 or not emb2:
|
|
526
|
+
texts = []
|
|
527
|
+
if not emb1:
|
|
528
|
+
texts.append(text1)
|
|
529
|
+
if not emb2:
|
|
530
|
+
texts.append(text2)
|
|
531
|
+
|
|
532
|
+
embeddings = self.create_embedding(texts, model)
|
|
533
|
+
|
|
534
|
+
if not emb1:
|
|
535
|
+
emb1 = embeddings[0]
|
|
536
|
+
self.embeddings_cache[text1] = emb1
|
|
537
|
+
if not emb2:
|
|
538
|
+
emb2 = embeddings[-1] if len(embeddings) > 1 else embeddings[0]
|
|
539
|
+
self.embeddings_cache[text2] = emb2
|
|
540
|
+
|
|
541
|
+
# Calculate cosine similarity
|
|
542
|
+
similarity = cosine_similarity([emb1], [emb2])[0][0]
|
|
543
|
+
return float(similarity)
|
|
544
|
+
|
|
545
|
+
def find_similar_texts(self,
|
|
546
|
+
query: str,
|
|
547
|
+
text_database: List[str],
|
|
548
|
+
top_k: int = 5,
|
|
549
|
+
model: str = "text-embedding-3-small") -> List[tuple]:
|
|
550
|
+
"""Find most similar texts from a database"""
|
|
551
|
+
# Create embeddings for all texts
|
|
552
|
+
all_texts = [query] + text_database
|
|
553
|
+
embeddings = self.create_embedding(all_texts, model)
|
|
554
|
+
|
|
555
|
+
query_embedding = embeddings[0]
|
|
556
|
+
db_embeddings = embeddings[1:]
|
|
557
|
+
|
|
558
|
+
# Calculate similarities
|
|
559
|
+
similarities = cosine_similarity([query_embedding], db_embeddings)[0]
|
|
560
|
+
|
|
561
|
+
# Get top-k results
|
|
562
|
+
top_indices = np.argsort(similarities)[::-1][:top_k]
|
|
563
|
+
|
|
564
|
+
results = [
|
|
565
|
+
(text_database[i], float(similarities[i]))
|
|
566
|
+
for i in top_indices
|
|
567
|
+
]
|
|
568
|
+
|
|
569
|
+
return results
|
|
570
|
+
|
|
571
|
+
class DocumentSearchEngine:
|
|
572
|
+
def __init__(self, embedding_manager: EmbeddingManager):
|
|
573
|
+
self.embedding_manager = embedding_manager
|
|
574
|
+
self.documents: List[Dict[str, Any]] = []
|
|
575
|
+
self.document_embeddings: List[List[float]] = []
|
|
576
|
+
|
|
577
|
+
def add_documents(self, documents: List[Dict[str, str]]):
|
|
578
|
+
"""Add documents to the search index"""
|
|
579
|
+
texts = [doc["content"] for doc in documents]
|
|
580
|
+
embeddings = self.embedding_manager.create_embedding(texts)
|
|
581
|
+
|
|
582
|
+
for doc, embedding in zip(documents, embeddings):
|
|
583
|
+
self.documents.append(doc)
|
|
584
|
+
self.document_embeddings.append(embedding)
|
|
585
|
+
|
|
586
|
+
def search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
|
|
587
|
+
"""Search for similar documents"""
|
|
588
|
+
if not self.documents:
|
|
589
|
+
return []
|
|
590
|
+
|
|
591
|
+
query_embedding = self.embedding_manager.create_embedding(query)[0]
|
|
592
|
+
|
|
593
|
+
# Calculate similarities
|
|
594
|
+
similarities = cosine_similarity([query_embedding], self.document_embeddings)[0]
|
|
595
|
+
|
|
596
|
+
# Get top-k results
|
|
597
|
+
top_indices = np.argsort(similarities)[::-1][:top_k]
|
|
598
|
+
|
|
599
|
+
results = []
|
|
600
|
+
for i in top_indices:
|
|
601
|
+
result = self.documents[i].copy()
|
|
602
|
+
result["similarity"] = float(similarities[i])
|
|
603
|
+
results.append(result)
|
|
604
|
+
|
|
605
|
+
return results
|
|
606
|
+
|
|
607
|
+
# Usage example
|
|
608
|
+
async def embedding_examples():
|
|
609
|
+
config = load_config()
|
|
610
|
+
client = OpenAIClient(config)
|
|
611
|
+
embedding_manager = EmbeddingManager(client)
|
|
612
|
+
|
|
613
|
+
# Basic similarity
|
|
614
|
+
similarity = embedding_manager.calculate_similarity(
|
|
615
|
+
"I love programming",
|
|
616
|
+
"Programming is my passion"
|
|
617
|
+
)
|
|
618
|
+
print(f"Similarity: {similarity}")
|
|
619
|
+
|
|
620
|
+
# Document search
|
|
621
|
+
search_engine = DocumentSearchEngine(embedding_manager)
|
|
622
|
+
|
|
623
|
+
documents = [
|
|
624
|
+
{"id": "1", "title": "Python Basics", "content": "Python is a programming language"},
|
|
625
|
+
{"id": "2", "title": "Web Development", "content": "Building web applications with frameworks"},
|
|
626
|
+
{"id": "3", "title": "Machine Learning", "content": "AI and ML algorithms and techniques"},
|
|
627
|
+
]
|
|
628
|
+
|
|
629
|
+
search_engine.add_documents(documents)
|
|
630
|
+
|
|
631
|
+
results = search_engine.search("programming languages")
|
|
632
|
+
for result in results:
|
|
633
|
+
print(f"Title: {result['title']}, Similarity: {result['similarity']:.3f}")
|
|
634
|
+
```
|
|
635
|
+
|
|
636
|
+
### Vision and Multimodal Capabilities
|
|
637
|
+
```python
|
|
638
|
+
import base64
|
|
639
|
+
from PIL import Image
|
|
640
|
+
import io
|
|
641
|
+
|
|
642
|
+
class VisionManager:
|
|
643
|
+
def __init__(self, client: OpenAIClient):
|
|
644
|
+
self.client = client
|
|
645
|
+
self.chat = ChatManager(client)
|
|
646
|
+
|
|
647
|
+
def encode_image(self, image_path: str) -> str:
|
|
648
|
+
"""Encode image to base64"""
|
|
649
|
+
with open(image_path, "rb") as image_file:
|
|
650
|
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
|
651
|
+
|
|
652
|
+
def encode_image_from_pil(self, pil_image: Image.Image, format: str = "PNG") -> str:
|
|
653
|
+
"""Encode PIL Image to base64"""
|
|
654
|
+
buffer = io.BytesIO()
|
|
655
|
+
pil_image.save(buffer, format=format)
|
|
656
|
+
return base64.b64encode(buffer.getvalue()).decode('utf-8')
|
|
657
|
+
|
|
658
|
+
def analyze_image(self,
|
|
659
|
+
image_path: str,
|
|
660
|
+
prompt: str,
|
|
661
|
+
model: str = "gpt-4-vision-preview",
|
|
662
|
+
max_tokens: int = 300) -> str:
|
|
663
|
+
"""Analyze image with text prompt"""
|
|
664
|
+
base64_image = self.encode_image(image_path)
|
|
665
|
+
|
|
666
|
+
messages = [
|
|
667
|
+
{
|
|
668
|
+
"role": "user",
|
|
669
|
+
"content": [
|
|
670
|
+
{"type": "text", "text": prompt},
|
|
671
|
+
{
|
|
672
|
+
"type": "image_url",
|
|
673
|
+
"image_url": {
|
|
674
|
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
]
|
|
678
|
+
}
|
|
679
|
+
]
|
|
680
|
+
|
|
681
|
+
response = self.chat.create_completion(
|
|
682
|
+
messages=messages,
|
|
683
|
+
model=model,
|
|
684
|
+
max_tokens=max_tokens
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
return response.choices[0].message.content
|
|
688
|
+
|
|
689
|
+
def analyze_multiple_images(self,
|
|
690
|
+
image_paths: List[str],
|
|
691
|
+
prompt: str,
|
|
692
|
+
model: str = "gpt-4-vision-preview") -> str:
|
|
693
|
+
"""Analyze multiple images together"""
|
|
694
|
+
content = [{"type": "text", "text": prompt}]
|
|
695
|
+
|
|
696
|
+
for image_path in image_paths:
|
|
697
|
+
base64_image = self.encode_image(image_path)
|
|
698
|
+
content.append({
|
|
699
|
+
"type": "image_url",
|
|
700
|
+
"image_url": {
|
|
701
|
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
|
702
|
+
}
|
|
703
|
+
})
|
|
704
|
+
|
|
705
|
+
messages = [{"role": "user", "content": content}]
|
|
706
|
+
|
|
707
|
+
response = self.chat.create_completion(
|
|
708
|
+
messages=messages,
|
|
709
|
+
model=model
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
return response.choices[0].message.content
|
|
713
|
+
|
|
714
|
+
def extract_text_from_image(self, image_path: str) -> str:
|
|
715
|
+
"""Extract text from image (OCR)"""
|
|
716
|
+
return self.analyze_image(
|
|
717
|
+
image_path,
|
|
718
|
+
"Extract all text from this image. Provide only the text content, maintaining the original formatting where possible."
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
def describe_image(self, image_path: str) -> str:
|
|
722
|
+
"""Get detailed description of image"""
|
|
723
|
+
return self.analyze_image(
|
|
724
|
+
image_path,
|
|
725
|
+
"Provide a detailed description of this image, including objects, people, settings, colors, and any notable features."
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
def analyze_chart_or_graph(self, image_path: str) -> str:
|
|
729
|
+
"""Analyze charts and graphs"""
|
|
730
|
+
return self.analyze_image(
|
|
731
|
+
image_path,
|
|
732
|
+
"Analyze this chart or graph. Describe the data, trends, key insights, and any notable patterns. Include specific values where visible."
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
# Usage examples
|
|
736
|
+
def vision_examples():
|
|
737
|
+
config = load_config()
|
|
738
|
+
client = OpenAIClient(config)
|
|
739
|
+
vision = VisionManager(client)
|
|
740
|
+
|
|
741
|
+
# Basic image analysis
|
|
742
|
+
description = vision.describe_image("product_image.jpg")
|
|
743
|
+
print(f"Image description: {description}")
|
|
744
|
+
|
|
745
|
+
# OCR text extraction
|
|
746
|
+
text = vision.extract_text_from_image("document.png")
|
|
747
|
+
print(f"Extracted text: {text}")
|
|
748
|
+
|
|
749
|
+
# Multiple image analysis
|
|
750
|
+
comparison = vision.analyze_multiple_images(
|
|
751
|
+
["before.jpg", "after.jpg"],
|
|
752
|
+
"Compare these two images and describe the differences."
|
|
753
|
+
)
|
|
754
|
+
print(f"Comparison: {comparison}")
|
|
755
|
+
```
|
|
756
|
+
|
|
757
|
+
### Audio Processing and Speech
|
|
758
|
+
```python
|
|
759
|
+
import whisper
|
|
760
|
+
from pathlib import Path
|
|
761
|
+
|
|
762
|
+
class AudioManager:
|
|
763
|
+
def __init__(self, client: OpenAIClient):
|
|
764
|
+
self.client = client
|
|
765
|
+
|
|
766
|
+
def transcribe_audio(self,
|
|
767
|
+
audio_file_path: str,
|
|
768
|
+
model: str = "whisper-1",
|
|
769
|
+
response_format: str = "json",
|
|
770
|
+
language: str = None) -> Dict[str, Any]:
|
|
771
|
+
"""Transcribe audio file to text"""
|
|
772
|
+
try:
|
|
773
|
+
with open(audio_file_path, "rb") as audio_file:
|
|
774
|
+
response = self.client.client.audio.transcriptions.create(
|
|
775
|
+
model=model,
|
|
776
|
+
file=audio_file,
|
|
777
|
+
response_format=response_format,
|
|
778
|
+
language=language
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
return response if response_format == "json" else {"text": response}
|
|
782
|
+
|
|
783
|
+
except Exception as e:
|
|
784
|
+
self.client.logger.error(f"Error transcribing audio: {e}")
|
|
785
|
+
raise
|
|
786
|
+
|
|
787
|
+
def translate_audio(self,
|
|
788
|
+
audio_file_path: str,
|
|
789
|
+
model: str = "whisper-1") -> Dict[str, Any]:
|
|
790
|
+
"""Translate audio to English"""
|
|
791
|
+
try:
|
|
792
|
+
with open(audio_file_path, "rb") as audio_file:
|
|
793
|
+
response = self.client.client.audio.translations.create(
|
|
794
|
+
model=model,
|
|
795
|
+
file=audio_file
|
|
796
|
+
)
|
|
797
|
+
return response
|
|
798
|
+
|
|
799
|
+
except Exception as e:
|
|
800
|
+
self.client.logger.error(f"Error translating audio: {e}")
|
|
801
|
+
raise
|
|
802
|
+
|
|
803
|
+
def text_to_speech(self,
|
|
804
|
+
text: str,
|
|
805
|
+
voice: str = "nova",
|
|
806
|
+
model: str = "tts-1",
|
|
807
|
+
output_path: str = "speech.mp3") -> str:
|
|
808
|
+
"""Convert text to speech"""
|
|
809
|
+
try:
|
|
810
|
+
response = self.client.client.audio.speech.create(
|
|
811
|
+
model=model,
|
|
812
|
+
voice=voice,
|
|
813
|
+
input=text
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
with open(output_path, "wb") as f:
|
|
817
|
+
f.write(response.content)
|
|
818
|
+
|
|
819
|
+
return output_path
|
|
820
|
+
|
|
821
|
+
except Exception as e:
|
|
822
|
+
self.client.logger.error(f"Error in text-to-speech: {e}")
|
|
823
|
+
raise
|
|
824
|
+
|
|
825
|
+
def batch_transcribe(self, audio_files: List[str]) -> List[Dict[str, Any]]:
|
|
826
|
+
"""Transcribe multiple audio files"""
|
|
827
|
+
results = []
|
|
828
|
+
|
|
829
|
+
for audio_file in audio_files:
|
|
830
|
+
try:
|
|
831
|
+
result = self.transcribe_audio(audio_file)
|
|
832
|
+
result["file"] = audio_file
|
|
833
|
+
results.append(result)
|
|
834
|
+
except Exception as e:
|
|
835
|
+
results.append({
|
|
836
|
+
"file": audio_file,
|
|
837
|
+
"error": str(e)
|
|
838
|
+
})
|
|
839
|
+
|
|
840
|
+
return results
|
|
841
|
+
|
|
842
|
+
# Usage example
|
|
843
|
+
def audio_examples():
|
|
844
|
+
config = load_config()
|
|
845
|
+
client = OpenAIClient(config)
|
|
846
|
+
audio = AudioManager(client)
|
|
847
|
+
|
|
848
|
+
# Transcribe audio
|
|
849
|
+
transcript = audio.transcribe_audio("meeting.mp3")
|
|
850
|
+
print(f"Transcript: {transcript['text']}")
|
|
851
|
+
|
|
852
|
+
# Text to speech
|
|
853
|
+
output_file = audio.text_to_speech("Hello, this is a test of text-to-speech.")
|
|
854
|
+
print(f"Speech generated: {output_file}")
|
|
855
|
+
|
|
856
|
+
# Batch transcription
|
|
857
|
+
audio_files = ["file1.mp3", "file2.wav", "file3.m4a"]
|
|
858
|
+
results = audio.batch_transcribe(audio_files)
|
|
859
|
+
for result in results:
|
|
860
|
+
print(f"File: {result['file']}, Text: {result.get('text', 'Error')}")
|
|
861
|
+
```
|
|
862
|
+
|
|
863
|
+
### Production Deployment Patterns
|
|
864
|
+
```python
|
|
865
|
+
import time
|
|
866
|
+
from typing import Optional
|
|
867
|
+
import asyncio
|
|
868
|
+
from dataclasses import dataclass
|
|
869
|
+
from datetime import datetime, timedelta
|
|
870
|
+
import json
|
|
871
|
+
|
|
872
|
+
@dataclass
|
|
873
|
+
class RateLimitConfig:
|
|
874
|
+
requests_per_minute: int = 50
|
|
875
|
+
requests_per_day: int = 1000
|
|
876
|
+
tokens_per_minute: int = 10000
|
|
877
|
+
|
|
878
|
+
class RateLimiter:
|
|
879
|
+
def __init__(self, config: RateLimitConfig):
|
|
880
|
+
self.config = config
|
|
881
|
+
self.requests_timestamps = []
|
|
882
|
+
self.daily_requests = 0
|
|
883
|
+
self.daily_reset_time = datetime.now() + timedelta(days=1)
|
|
884
|
+
self.tokens_used = 0
|
|
885
|
+
self.tokens_reset_time = datetime.now() + timedelta(minutes=1)
|
|
886
|
+
|
|
887
|
+
async def wait_if_needed(self, estimated_tokens: int = 0):
|
|
888
|
+
"""Wait if rate limits would be exceeded"""
|
|
889
|
+
now = datetime.now()
|
|
890
|
+
|
|
891
|
+
# Reset daily counter if needed
|
|
892
|
+
if now >= self.daily_reset_time:
|
|
893
|
+
self.daily_requests = 0
|
|
894
|
+
self.daily_reset_time = now + timedelta(days=1)
|
|
895
|
+
|
|
896
|
+
# Reset token counter if needed
|
|
897
|
+
if now >= self.tokens_reset_time:
|
|
898
|
+
self.tokens_used = 0
|
|
899
|
+
self.tokens_reset_time = now + timedelta(minutes=1)
|
|
900
|
+
|
|
901
|
+
# Remove old request timestamps
|
|
902
|
+
minute_ago = now - timedelta(minutes=1)
|
|
903
|
+
self.requests_timestamps = [ts for ts in self.requests_timestamps if ts > minute_ago]
|
|
904
|
+
|
|
905
|
+
# Check rate limits
|
|
906
|
+
if len(self.requests_timestamps) >= self.config.requests_per_minute:
|
|
907
|
+
wait_time = 60 - (now - self.requests_timestamps[0]).total_seconds()
|
|
908
|
+
if wait_time > 0:
|
|
909
|
+
await asyncio.sleep(wait_time)
|
|
910
|
+
|
|
911
|
+
if self.daily_requests >= self.config.requests_per_day:
|
|
912
|
+
wait_time = (self.daily_reset_time - now).total_seconds()
|
|
913
|
+
raise Exception(f"Daily rate limit exceeded. Reset in {wait_time:.0f} seconds")
|
|
914
|
+
|
|
915
|
+
if self.tokens_used + estimated_tokens > self.config.tokens_per_minute:
|
|
916
|
+
wait_time = (self.tokens_reset_time - now).total_seconds()
|
|
917
|
+
if wait_time > 0:
|
|
918
|
+
await asyncio.sleep(wait_time)
|
|
919
|
+
|
|
920
|
+
# Record this request
|
|
921
|
+
self.requests_timestamps.append(now)
|
|
922
|
+
self.daily_requests += 1
|
|
923
|
+
self.tokens_used += estimated_tokens
|
|
924
|
+
|
|
925
|
+
class ProductionOpenAIClient:
|
|
926
|
+
def __init__(self, config: OpenAIConfig, rate_limit_config: RateLimitConfig = None):
|
|
927
|
+
self.client = OpenAIClient(config)
|
|
928
|
+
self.rate_limiter = RateLimiter(rate_limit_config or RateLimitConfig())
|
|
929
|
+
self.metrics = {
|
|
930
|
+
"total_requests": 0,
|
|
931
|
+
"successful_requests": 0,
|
|
932
|
+
"failed_requests": 0,
|
|
933
|
+
"total_tokens_used": 0,
|
|
934
|
+
"total_cost": 0.0
|
|
935
|
+
}
|
|
936
|
+
self.cost_per_token = {
|
|
937
|
+
"gpt-4": {"input": 0.00003, "output": 0.00006},
|
|
938
|
+
"gpt-3.5-turbo": {"input": 0.0000015, "output": 0.000002},
|
|
939
|
+
"text-embedding-ada-002": {"input": 0.0000001, "output": 0}
|
|
940
|
+
}
|
|
941
|
+
|
|
942
|
+
async def safe_completion(self,
|
|
943
|
+
messages: List[Dict[str, str]],
|
|
944
|
+
model: str = None,
|
|
945
|
+
max_retries: int = 3,
|
|
946
|
+
**kwargs) -> Optional[ChatCompletion]:
|
|
947
|
+
"""Create completion with error handling and retries"""
|
|
948
|
+
model = model or self.client.config.default_model
|
|
949
|
+
|
|
950
|
+
# Estimate tokens for rate limiting
|
|
951
|
+
estimated_tokens = sum(len(msg["content"].split()) for msg in messages) * 4
|
|
952
|
+
|
|
953
|
+
for attempt in range(max_retries):
|
|
954
|
+
try:
|
|
955
|
+
await self.rate_limiter.wait_if_needed(estimated_tokens)
|
|
956
|
+
|
|
957
|
+
response = await self.client.async_client.chat.completions.create(
|
|
958
|
+
model=model,
|
|
959
|
+
messages=messages,
|
|
960
|
+
**kwargs
|
|
961
|
+
)
|
|
962
|
+
|
|
963
|
+
# Update metrics
|
|
964
|
+
self.metrics["total_requests"] += 1
|
|
965
|
+
self.metrics["successful_requests"] += 1
|
|
966
|
+
|
|
967
|
+
if response.usage:
|
|
968
|
+
self.metrics["total_tokens_used"] += response.usage.total_tokens
|
|
969
|
+
self._update_cost(model, response.usage)
|
|
970
|
+
|
|
971
|
+
return response
|
|
972
|
+
|
|
973
|
+
except Exception as e:
|
|
974
|
+
self.metrics["total_requests"] += 1
|
|
975
|
+
self.metrics["failed_requests"] += 1
|
|
976
|
+
|
|
977
|
+
if attempt == max_retries - 1:
|
|
978
|
+
self.client.logger.error(f"Final attempt failed: {e}")
|
|
979
|
+
return None
|
|
980
|
+
|
|
981
|
+
# Exponential backoff
|
|
982
|
+
wait_time = 2 ** attempt
|
|
983
|
+
self.client.logger.warning(f"Attempt {attempt + 1} failed: {e}. Retrying in {wait_time}s")
|
|
984
|
+
await asyncio.sleep(wait_time)
|
|
985
|
+
|
|
986
|
+
return None
|
|
987
|
+
|
|
988
|
+
def _update_cost(self, model: str, usage):
|
|
989
|
+
"""Update cost metrics"""
|
|
990
|
+
if model in self.cost_per_token:
|
|
991
|
+
cost_info = self.cost_per_token[model]
|
|
992
|
+
input_cost = usage.prompt_tokens * cost_info["input"]
|
|
993
|
+
output_cost = usage.completion_tokens * cost_info["output"]
|
|
994
|
+
self.metrics["total_cost"] += input_cost + output_cost
|
|
995
|
+
|
|
996
|
+
async def batch_completions(self,
|
|
997
|
+
batch_requests: List[Dict[str, Any]],
|
|
998
|
+
batch_size: int = 5,
|
|
999
|
+
delay_between_batches: float = 1.0) -> List[Optional[ChatCompletion]]:
|
|
1000
|
+
"""Process multiple completion requests in batches"""
|
|
1001
|
+
results = []
|
|
1002
|
+
|
|
1003
|
+
for i in range(0, len(batch_requests), batch_size):
|
|
1004
|
+
batch = batch_requests[i:i + batch_size]
|
|
1005
|
+
|
|
1006
|
+
# Process batch concurrently
|
|
1007
|
+
tasks = [
|
|
1008
|
+
self.safe_completion(**request)
|
|
1009
|
+
for request in batch
|
|
1010
|
+
]
|
|
1011
|
+
|
|
1012
|
+
batch_results = await asyncio.gather(*tasks)
|
|
1013
|
+
results.extend(batch_results)
|
|
1014
|
+
|
|
1015
|
+
# Delay between batches
|
|
1016
|
+
if i + batch_size < len(batch_requests):
|
|
1017
|
+
await asyncio.sleep(delay_between_batches)
|
|
1018
|
+
|
|
1019
|
+
return results
|
|
1020
|
+
|
|
1021
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
1022
|
+
"""Get current usage metrics"""
|
|
1023
|
+
return self.metrics.copy()
|
|
1024
|
+
|
|
1025
|
+
def export_metrics(self, filename: str):
|
|
1026
|
+
"""Export metrics to JSON file"""
|
|
1027
|
+
with open(filename, 'w') as f:
|
|
1028
|
+
json.dump({
|
|
1029
|
+
**self.metrics,
|
|
1030
|
+
"export_time": datetime.now().isoformat()
|
|
1031
|
+
}, f, indent=2)
|
|
1032
|
+
|
|
1033
|
+
# Usage example
|
|
1034
|
+
async def production_example():
|
|
1035
|
+
config = load_config()
|
|
1036
|
+
rate_config = RateLimitConfig(requests_per_minute=30, tokens_per_minute=5000)
|
|
1037
|
+
|
|
1038
|
+
prod_client = ProductionOpenAIClient(config, rate_config)
|
|
1039
|
+
|
|
1040
|
+
# Single completion with safety
|
|
1041
|
+
messages = [{"role": "user", "content": "What is machine learning?"}]
|
|
1042
|
+
response = await prod_client.safe_completion(messages)
|
|
1043
|
+
|
|
1044
|
+
if response:
|
|
1045
|
+
print(f"Response: {response.choices[0].message.content}")
|
|
1046
|
+
|
|
1047
|
+
# Batch processing
|
|
1048
|
+
batch_requests = [
|
|
1049
|
+
{
|
|
1050
|
+
"messages": [{"role": "user", "content": f"Explain topic {i}"}],
|
|
1051
|
+
"model": "gpt-3.5-turbo",
|
|
1052
|
+
"max_tokens": 100
|
|
1053
|
+
}
|
|
1054
|
+
for i in range(10)
|
|
1055
|
+
]
|
|
1056
|
+
|
|
1057
|
+
results = await prod_client.batch_completions(batch_requests)
|
|
1058
|
+
|
|
1059
|
+
# Check metrics
|
|
1060
|
+
metrics = prod_client.get_metrics()
|
|
1061
|
+
print(f"Metrics: {metrics}")
|
|
1062
|
+
|
|
1063
|
+
# Export metrics
|
|
1064
|
+
prod_client.export_metrics("openai_metrics.json")
|
|
1065
|
+
```
|
|
1066
|
+
|
|
1067
|
+
## Documentation Retrieval Protocol
|
|
1068
|
+
|
|
1069
|
+
1. **Check Latest Features**: Query context7 for OpenAI API updates
|
|
1070
|
+
2. **Model Capabilities**: Access current model specifications and limits
|
|
1071
|
+
3. **Best Practices**: Review production deployment and optimization guides
|
|
1072
|
+
|
|
1073
|
+
**Documentation Queries:**
|
|
1074
|
+
- `mcp://context7/openai/latest` - OpenAI API documentation
|
|
1075
|
+
- `mcp://context7/openai/models` - Available models and capabilities
|
|
1076
|
+
- `mcp://context7/openai/production` - Production best practices
|
|
1077
|
+
|
|
1078
|
+
## Self-Verification Protocol
|
|
1079
|
+
|
|
1080
|
+
Before delivering any solution, verify:
|
|
1081
|
+
- [ ] Documentation from Context7 has been consulted
|
|
1082
|
+
- [ ] Code follows best practices
|
|
1083
|
+
- [ ] Tests are written and passing
|
|
1084
|
+
- [ ] Performance is acceptable
|
|
1085
|
+
- [ ] Security considerations addressed
|
|
1086
|
+
- [ ] No resource leaks
|
|
1087
|
+
- [ ] Error handling is comprehensive
|