empathy-framework 5.0.1__py3-none-any.whl → 5.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/METADATA +53 -9
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/RECORD +28 -31
- empathy_llm_toolkit/providers.py +175 -35
- empathy_llm_toolkit/utils/tokens.py +150 -30
- empathy_os/__init__.py +1 -1
- empathy_os/cli/commands/batch.py +256 -0
- empathy_os/cli/commands/cache.py +248 -0
- empathy_os/cli/commands/inspect.py +1 -2
- empathy_os/cli/commands/metrics.py +1 -1
- empathy_os/cli/commands/routing.py +285 -0
- empathy_os/cli/commands/workflow.py +2 -2
- empathy_os/cli/parsers/__init__.py +6 -0
- empathy_os/cli/parsers/batch.py +118 -0
- empathy_os/cli/parsers/cache.py +65 -0
- empathy_os/cli/parsers/routing.py +110 -0
- empathy_os/dashboard/standalone_server.py +22 -11
- empathy_os/metrics/collector.py +31 -0
- empathy_os/models/token_estimator.py +21 -13
- empathy_os/telemetry/agent_coordination.py +12 -14
- empathy_os/telemetry/agent_tracking.py +18 -19
- empathy_os/telemetry/approval_gates.py +27 -39
- empathy_os/telemetry/event_streaming.py +19 -19
- empathy_os/telemetry/feedback_loop.py +13 -16
- empathy_os/workflows/batch_processing.py +56 -10
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/README 2.md +0 -454
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/WHEEL +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: empathy-framework
|
|
3
|
-
Version: 5.0.
|
|
3
|
+
Version: 5.0.3
|
|
4
4
|
Summary: AI collaboration framework with real LLM agent execution, AskUserQuestion tool integration, Socratic agent generation, progressive tier escalation (70-85% cost savings), meta-orchestration, dynamic agent composition (6 patterns), intelligent caching (85% hit rate), semantic workflow discovery, visual workflow editor, MCP integration for Claude Code, and multi-agent orchestration.
|
|
5
5
|
Author-email: Patrick Roebuck <admin@smartaimemory.com>
|
|
6
6
|
Maintainer-email: Smart-AI-Memory <admin@smartaimemory.com>
|
|
@@ -170,7 +170,7 @@ License-File: LICENSE
|
|
|
170
170
|
Requires-Dist: pydantic<3.0.0,>=2.0.0
|
|
171
171
|
Requires-Dist: typing-extensions<5.0.0,>=4.0.0
|
|
172
172
|
Requires-Dist: python-dotenv<2.0.0,>=1.0.0
|
|
173
|
-
Requires-Dist: structlog<
|
|
173
|
+
Requires-Dist: structlog<26.0.0,>=23.0.0
|
|
174
174
|
Requires-Dist: defusedxml<1.0.0,>=0.7.0
|
|
175
175
|
Requires-Dist: rich<14.0.0,>=13.0.0
|
|
176
176
|
Requires-Dist: typer<1.0.0,>=0.9.0
|
|
@@ -256,7 +256,7 @@ Requires-Dist: pytest-cov<8.0,>=4.0; extra == "dev"
|
|
|
256
256
|
Requires-Dist: pytest-xdist<4.0,>=3.5.0; extra == "dev"
|
|
257
257
|
Requires-Dist: pytest-testmon<3.0,>=2.1.0; extra == "dev"
|
|
258
258
|
Requires-Dist: pytest-picked<1.0,>=0.5.0; extra == "dev"
|
|
259
|
-
Requires-Dist: black<
|
|
259
|
+
Requires-Dist: black<27.0,>=24.3.0; extra == "dev"
|
|
260
260
|
Requires-Dist: mypy<2.0,>=1.0; extra == "dev"
|
|
261
261
|
Requires-Dist: ruff<1.0,>=0.1; extra == "dev"
|
|
262
262
|
Requires-Dist: coverage<8.0,>=7.0; extra == "dev"
|
|
@@ -341,15 +341,15 @@ Requires-Dist: mkdocs-material<10.0.0,>=9.4.0; extra == "all"
|
|
|
341
341
|
Requires-Dist: mkdocstrings[python]<1.0.0,>=0.24.0; extra == "all"
|
|
342
342
|
Requires-Dist: mkdocs-with-pdf<1.0.0,>=0.9.3; extra == "all"
|
|
343
343
|
Requires-Dist: pymdown-extensions<11.0,>=10.0; extra == "all"
|
|
344
|
-
Requires-Dist: pytest<
|
|
345
|
-
Requires-Dist: pytest-asyncio<
|
|
346
|
-
Requires-Dist: pytest-cov<
|
|
347
|
-
Requires-Dist: black<
|
|
344
|
+
Requires-Dist: pytest<10.0,>=7.0; extra == "all"
|
|
345
|
+
Requires-Dist: pytest-asyncio<2.0,>=0.21; extra == "all"
|
|
346
|
+
Requires-Dist: pytest-cov<8.0,>=4.0; extra == "all"
|
|
347
|
+
Requires-Dist: black<27.0,>=24.3.0; extra == "all"
|
|
348
348
|
Requires-Dist: mypy<2.0,>=1.0; extra == "all"
|
|
349
349
|
Requires-Dist: ruff<1.0,>=0.1; extra == "all"
|
|
350
350
|
Requires-Dist: coverage<8.0,>=7.0; extra == "all"
|
|
351
351
|
Requires-Dist: bandit<2.0,>=1.7; extra == "all"
|
|
352
|
-
Requires-Dist: pre-commit<
|
|
352
|
+
Requires-Dist: pre-commit<5.0,>=3.0; extra == "all"
|
|
353
353
|
Requires-Dist: httpx<1.0.0,>=0.27.0; extra == "all"
|
|
354
354
|
Requires-Dist: urllib3<3.0.0,>=2.3.0; extra == "all"
|
|
355
355
|
Requires-Dist: aiohttp<4.0.0,>=3.10.0; extra == "all"
|
|
@@ -386,12 +386,56 @@ pip install empathy-framework[developer]
|
|
|
386
386
|
**Timeline:**
|
|
387
387
|
- ✅ **v4.8.0 (Jan 2026):** Deprecation warnings for OpenAI/Google/Ollama providers
|
|
388
388
|
- ✅ **v5.0.0 (Jan 26, 2026):** Non-Anthropic providers removed (BREAKING - COMPLETE)
|
|
389
|
-
-
|
|
389
|
+
- ✅ **v5.0.2 (Jan 28, 2026):** Cost optimization suite with batch processing and caching monitoring
|
|
390
390
|
|
|
391
391
|
**Migration Guide:** [docs/CLAUDE_NATIVE.md](docs/CLAUDE_NATIVE.md)
|
|
392
392
|
|
|
393
393
|
---
|
|
394
394
|
|
|
395
|
+
## What's New in v5.0.2
|
|
396
|
+
|
|
397
|
+
**💰 50% Cost Savings with Batch API** - Process non-urgent tasks asynchronously:
|
|
398
|
+
|
|
399
|
+
```bash
|
|
400
|
+
empathy batch submit batch_requests.json # Submit batch job
|
|
401
|
+
empathy batch status msgbatch_abc123 # Check progress
|
|
402
|
+
empathy batch results msgbatch_abc123 output.json # Download results
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
Perfect for: log analysis, report generation, bulk classification, test generation
|
|
406
|
+
|
|
407
|
+
**📊 Precise Token Counting** - >98% accurate cost tracking:
|
|
408
|
+
|
|
409
|
+
- Integrated Anthropic's `count_tokens()` API for billing-accurate measurements
|
|
410
|
+
- 3-tier fallback: API → tiktoken (local) → heuristic
|
|
411
|
+
- Cache-aware cost calculation (25% write markup, 90% read discount)
|
|
412
|
+
|
|
413
|
+
**📈 Cache Performance Monitoring** - Track your 20-30% caching savings:
|
|
414
|
+
|
|
415
|
+
```bash
|
|
416
|
+
empathy cache stats # Show hit rates and cost savings
|
|
417
|
+
empathy cache stats --verbose # Detailed token metrics
|
|
418
|
+
empathy cache stats --format json # Machine-readable output
|
|
419
|
+
```
|
|
420
|
+
|
|
421
|
+
**🧭 Adaptive Routing Analytics** - Intelligent tier recommendations:
|
|
422
|
+
|
|
423
|
+
```bash
|
|
424
|
+
empathy routing stats <workflow> # Performance metrics
|
|
425
|
+
empathy routing check --all # Tier upgrade recommendations
|
|
426
|
+
empathy routing models --provider anthropic # Compare models
|
|
427
|
+
```
|
|
428
|
+
|
|
429
|
+
**🔧 Dashboard Fixes** - All 6 agent coordination patterns now operational:
|
|
430
|
+
- Agent heartbeats displaying correctly
|
|
431
|
+
- Event streaming functional
|
|
432
|
+
- Coordination signals working
|
|
433
|
+
- Approval gates operational
|
|
434
|
+
|
|
435
|
+
[See Full Changelog](CHANGELOG.md#502---2026-01-28) | [Batch API Guide](docs/BATCH_API_GUIDE.md) | [User API Docs](docs/USER_API_DOCUMENTATION.md)
|
|
436
|
+
|
|
437
|
+
---
|
|
438
|
+
|
|
395
439
|
## What's New in v4.9.0
|
|
396
440
|
|
|
397
441
|
**⚡ 18x Faster Performance** - Massive performance gains through Phase 2 optimizations:
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
empathy_framework-5.0.
|
|
1
|
+
empathy_framework-5.0.3.dist-info/licenses/LICENSE,sha256=IJ9eeI5KSrD5P7alsn7sI_6_1bDihxBA5S4Sen4jf2k,4937
|
|
2
2
|
empathy_healthcare_plugin/__init__.py,sha256=4NioL1_86UXzkd-QNkQZUSZ8rKTQGSP0TC9VXP32kQs,295
|
|
3
3
|
empathy_healthcare_plugin/monitors/__init__.py,sha256=Udp8qfZR504QAq5_eQjvtIaE7v06Yguc7nuF40KllQc,196
|
|
4
4
|
empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py,sha256=MWE5t8tW9HWZn_SNo-inx8-0nhdTNGhbcB8ZeDWyXa0,11648
|
|
@@ -22,7 +22,7 @@ empathy_llm_toolkit/levels.py,sha256=Hu9G83iY-BRyjMJKNoDNYZ9xFD0Y0LIn_XEBc5aRWx0
|
|
|
22
22
|
empathy_llm_toolkit/pattern_confidence.py,sha256=6plkrdDTb3GcjeNxohJlpeHnNWFRRcj5U76D2hvLSGA,14153
|
|
23
23
|
empathy_llm_toolkit/pattern_resolver.py,sha256=VLTeGvEDfQyDwNy5u-t18A5c0BcCpp1FqdExxfqZSlw,9292
|
|
24
24
|
empathy_llm_toolkit/pattern_summary.py,sha256=gA_rCXgfOT1Kp6FP5f1UZqXPp8_mkYXlAfTxc2lhOnc,12286
|
|
25
|
-
empathy_llm_toolkit/providers.py,sha256=
|
|
25
|
+
empathy_llm_toolkit/providers.py,sha256=v-HcN-v7CykKZmgzLdn2iyjTK5-dtCTBsdpWauOG24A,34852
|
|
26
26
|
empathy_llm_toolkit/session_status.py,sha256=a8QPYf8Sp7jnzHkeRNOyXqoj0QwwoVwkvxPXW9qdvgc,25722
|
|
27
27
|
empathy_llm_toolkit/state.py,sha256=diLXx8QKzfucVDhbKNWej40JXQFmIeM1Qvvzg92pmjU,8030
|
|
28
28
|
empathy_llm_toolkit/agent_factory/__init__.py,sha256=JlusqsYjl_1FqW75h-Uikw2al6v56FAfGSHmQ6gMOk0,1759
|
|
@@ -93,8 +93,8 @@ empathy_llm_toolkit/security/secrets_detector_example.py,sha256=LDMJuINpRI-QtWnC
|
|
|
93
93
|
empathy_llm_toolkit/security/secure_memdocs.py,sha256=x6won-u3tiBkv5ZPVRpSJKTs1mQTZ79jWAEkT8iMxtc,40230
|
|
94
94
|
empathy_llm_toolkit/security/secure_memdocs_example.py,sha256=4UwbaM5X-c93p4_3pz6LKaeNYzfxptzbsYY_-ZuG0A0,8359
|
|
95
95
|
empathy_llm_toolkit/utils/__init__.py,sha256=b2Juy_nuMJ7Ax9tck725V7i3s5shaMSfqZZbiJ2-rOc,187
|
|
96
|
-
empathy_llm_toolkit/utils/tokens.py,sha256=
|
|
97
|
-
empathy_os/__init__.py,sha256=
|
|
96
|
+
empathy_llm_toolkit/utils/tokens.py,sha256=vu5aggQYmoESIG1dkFahfX6hs6QiyPPF0gMTEp9mGak,11220
|
|
97
|
+
empathy_os/__init__.py,sha256=1k9dqEd8YT2IzC6SYPiq1tCWtdLz3ahC_zGL6fdV3Bw,12061
|
|
98
98
|
empathy_os/agent_monitoring.py,sha256=s4seLC_J4AtQ3PYWrRPO8YHM-Fbm0Q36kPEdlTHf2HI,13375
|
|
99
99
|
empathy_os/cache_monitor.py,sha256=lcBqODhYY9iPaH35PWkOSgyMavdvXneHv9F57dPmjnc,11190
|
|
100
100
|
empathy_os/cache_stats.py,sha256=rWJPBNFEfhuLwKYKy89D_Qa9GPIyVso2jdCKp_cdJhI,10232
|
|
@@ -122,7 +122,6 @@ empathy_os/redis_memory.py,sha256=MYhW_M41AoWhxxh8YggWUHxZaUoQZ7XxqeBB2qDqyww,23
|
|
|
122
122
|
empathy_os/templates.py,sha256=B5fzlc6wU3lwdaKK4ZUdytqOYrM6TEnFteZ0nCSp_EI,17220
|
|
123
123
|
empathy_os/tier_recommender.py,sha256=oYOKhnrTG2sqYsDvojQyaKuyC4ac_weSz4Xv0j0TuP8,14305
|
|
124
124
|
empathy_os/trust_building.py,sha256=z2ZsQWK5QjX6d7pxFWrFD54J1keUguTog6YztZJVPKI,19180
|
|
125
|
-
empathy_os/vscode_bridge 2.py,sha256=SgFmWFCoAoPww-g2hsRC2VeqAMTqUmXXxJ3hglykxP4,4967
|
|
126
125
|
empathy_os/vscode_bridge.py,sha256=SgFmWFCoAoPww-g2hsRC2VeqAMTqUmXXxJ3hglykxP4,4967
|
|
127
126
|
empathy_os/workflow_commands.py,sha256=tps6norBiw7Q2hV0qdqwdX4B7Kedt_FwYBQWhc2oEYg,25482
|
|
128
127
|
empathy_os/adaptive/__init__.py,sha256=07m_Gsdxmm9mDBjJRFz7a8wZK-Tdd2QFRQOUw6yCc1w,342
|
|
@@ -137,23 +136,28 @@ empathy_os/cli/__init__.py,sha256=7PFURX1O4_sX14kWvNrnAV1Ndeixrx_nI8IN8eLQpQs,49
|
|
|
137
136
|
empathy_os/cli/__main__.py,sha256=md0GsFpiTgifmJ0MJQ_E6Daq9JCxyCUgOUJRdJ69Ews,215
|
|
138
137
|
empathy_os/cli/core.py,sha256=USK7MwpSqxKcMP0rxtf9qqTgQ1GMmcPpXBw9uadQ3sM,883
|
|
139
138
|
empathy_os/cli/commands/__init__.py,sha256=GYdoNv80KS2p7bCThuoRbhiLmIZW6ddPqPEFEx567nQ,35
|
|
139
|
+
empathy_os/cli/commands/batch.py,sha256=yyqKz_W9qchKlVFkCshKziFYZi6-1RCC0yp9j7tPCVk,7951
|
|
140
|
+
empathy_os/cli/commands/cache.py,sha256=1KSX_4dv1ZfwtfGxhb5kop-8mSPBCkFFFAI9BCfh9Wk,8188
|
|
140
141
|
empathy_os/cli/commands/help.py,sha256=AerAgAl0zW8YCCQIkxxc9p9K_wAskDGNhq211elPY_k,10082
|
|
141
142
|
empathy_os/cli/commands/info.py,sha256=SFwOAhHjIoHRguqbMEPT0lG7bbt72Zv5AFpZjJPfHm8,4794
|
|
142
|
-
empathy_os/cli/commands/inspect.py,sha256=
|
|
143
|
+
empathy_os/cli/commands/inspect.py,sha256=DgqfA5uGEN94ROcoYja3Wa4KHCYPb0-df9N53pIUV24,16228
|
|
143
144
|
empathy_os/cli/commands/inspection.py,sha256=1rszYfpj1zcYZsbKhJztQlwO1anF0Pgc5PGQ6M9Knk8,1781
|
|
144
145
|
empathy_os/cli/commands/memory.py,sha256=1NTgfIHGdjKwsU5fAZZwZI5HUN7b4AE62_UF9psw3NE,1361
|
|
145
|
-
empathy_os/cli/commands/metrics.py,sha256=
|
|
146
|
+
empathy_os/cli/commands/metrics.py,sha256=pX41tJpvRu-rGip2drdCkgZ0LDxOd0DVGXuV_hEY7ng,3176
|
|
146
147
|
empathy_os/cli/commands/orchestrate.py,sha256=ifqEGUGaz8aF1mW3muGrbaJRqqJodcBXIyZDIIf1WPA,6553
|
|
147
148
|
empathy_os/cli/commands/patterns.py,sha256=EVLX4akwGdkZOvJEQZDUojIAa5yK_50rE-TTFzEs-9w,7938
|
|
148
149
|
empathy_os/cli/commands/profiling.py,sha256=n_1cyLwwguSHJUj6NKDr5gBuP1gSBLnkQ_ccxUmAkNY,5706
|
|
149
150
|
empathy_os/cli/commands/provider.py,sha256=Mg8SjNsWKfhhdEZ0RvyfEaR3yiYcGj5L6OE5YFZRRFw,3095
|
|
151
|
+
empathy_os/cli/commands/routing.py,sha256=2Z19yuWNeIbJeVDKrLzsNLkDn5m6gfYDA_TXbdVLJuo,9905
|
|
150
152
|
empathy_os/cli/commands/setup.py,sha256=cF_3u7lrxfoyTr-ucu4D4Gc01EqtAQSxrwwU32tDsA0,3456
|
|
151
153
|
empathy_os/cli/commands/status.py,sha256=ZIJx4Y0iMVFW_NDnoio1R6NBcXXS543QD9MwdaPA5B0,8309
|
|
152
154
|
empathy_os/cli/commands/sync.py,sha256=5ABcmJ--puRXEH10yXBMOnqaue1miEPCzYhIbov5pq8,5094
|
|
153
155
|
empathy_os/cli/commands/tier.py,sha256=3l3j7m83lcaKUFClTb2iU5vkH06zC8-eLGrt-KHdz0M,3745
|
|
154
156
|
empathy_os/cli/commands/utilities.py,sha256=e4izGTS_uOy4YJDPwPbQnQ20p9M2v3JUyqAQEPwitMc,3737
|
|
155
|
-
empathy_os/cli/commands/workflow.py,sha256=
|
|
156
|
-
empathy_os/cli/parsers/__init__.py,sha256=
|
|
157
|
+
empathy_os/cli/commands/workflow.py,sha256=UGmZcUVMDL2Dvg3umbsUYn48s0D73Cxy4XMIF3SgPzQ,23183
|
|
158
|
+
empathy_os/cli/parsers/__init__.py,sha256=Oihx1jb5wN_4GS3RxZiEYhQhglI44idOOlFJs9a6e5g,1677
|
|
159
|
+
empathy_os/cli/parsers/batch.py,sha256=wSutRqerJZxUNVNIotU9qtI8EGP6IzVihEZUViCItCQ,3236
|
|
160
|
+
empathy_os/cli/parsers/cache.py,sha256=_SpFE48JM1xfAvb_ndqYNR0bP4EyMb_T_PlmIsIHf5o,1797
|
|
157
161
|
empathy_os/cli/parsers/help.py,sha256=I3QFDVABxZpYDCJ52GNWwfQlKEdV72Yr9A8spPHqUqU,1642
|
|
158
162
|
empathy_os/cli/parsers/info.py,sha256=525oPlxKkOJk8tra_a2ga9DKm6VtejQUBWzBCDQK1v4,989
|
|
159
163
|
empathy_os/cli/parsers/inspect.py,sha256=DekJWgmrD_rVjUxvS0WI_Lx55DqquXZn_T0J3mC53M0,2427
|
|
@@ -161,6 +165,7 @@ empathy_os/cli/parsers/metrics.py,sha256=dbMIbmsrGxRvaG6kMh8LB3IWtmD_R4u6W131skV
|
|
|
161
165
|
empathy_os/cli/parsers/orchestrate.py,sha256=kACmZExVIJ_F1cBRGOYtSxf8AWy_k1n3GNTuDIsxNVY,1769
|
|
162
166
|
empathy_os/cli/parsers/patterns.py,sha256=4Jy7A_iZWJRnIdPUi6vz2tl2_agkhSgpNv8LlEoUScI,2374
|
|
163
167
|
empathy_os/cli/parsers/provider.py,sha256=fh2YGIQ0LOP7Lb2LaphOQFO01rWDxhvCprXYWLy7YXE,1115
|
|
168
|
+
empathy_os/cli/parsers/routing.py,sha256=YJ4W_UISXCOpvb7xp4GZUbJhBpKDg5ew7izJSzq8B6E,2884
|
|
164
169
|
empathy_os/cli/parsers/setup.py,sha256=K3bghLEkNijDAAvmUfBnDFOsG9J9Hpmyi7EruFalQaY,1105
|
|
165
170
|
empathy_os/cli/parsers/status.py,sha256=Z_r-C92hcrJ3iBmSok6iEkZfYDRvs7_3WD0oQQULzDw,2370
|
|
166
171
|
empathy_os/cli/parsers/sync.py,sha256=fYXJDJTE1_JYdc15y7bHJOalCnB0Yg9ZkQQPPTUMUIo,884
|
|
@@ -174,7 +179,7 @@ empathy_os/config/xml_config.py,sha256=iXFJvGPlSoxRdARlxx-7YIdxb8eRiFHVRWM6-HwOW
|
|
|
174
179
|
empathy_os/dashboard/__init__.py,sha256=CwGq_IQS5H3OjY0NabAihvxohns8vsw0jmEnq3p1UGg,1413
|
|
175
180
|
empathy_os/dashboard/app.py,sha256=taCHkhSAsnna_joClhLTai5ayevYbh67nJDpOYOU-EA,16147
|
|
176
181
|
empathy_os/dashboard/simple_server.py,sha256=L2uSgCJtiHS7TZSu-iEXpImNTAW4w3zNVg8YNsj5vPs,14667
|
|
177
|
-
empathy_os/dashboard/standalone_server.py,sha256=
|
|
182
|
+
empathy_os/dashboard/standalone_server.py,sha256=rsJdlHZii3UUbA4Yi3H8KDmc77RGqBbyZUHIOKvFE2M,20862
|
|
178
183
|
empathy_os/hot_reload/README.md,sha256=FnQWSX-dfTIJvXHjjiWRWBjMK2DR-eyOfDHJlGIzm0o,10406
|
|
179
184
|
empathy_os/hot_reload/__init__.py,sha256=Aos2tLSKRzOLr5zRomLyzrz6qDRd1_PlinI3vrJcCTo,1642
|
|
180
185
|
empathy_os/hot_reload/config.py,sha256=Lk_5bShouV-Z_atOvgcoPas4wwXwfHTKK9eSC60C9Uk,2290
|
|
@@ -215,6 +220,7 @@ empathy_os/meta_workflows/session_context.py,sha256=MNx_P7Sv-Tt5YXaX-HH2HUc_f2sc
|
|
|
215
220
|
empathy_os/meta_workflows/template_registry.py,sha256=jEvQNZtQ4UhitBuyml1dDjQJNLoT-BvPIFgvWSuw0xs,7485
|
|
216
221
|
empathy_os/meta_workflows/workflow.py,sha256=cKzaIWcVuRMRk2YoXJCMnl_4y5XUpD6b-uVStAlUqe8,36007
|
|
217
222
|
empathy_os/metrics/__init__.py,sha256=b0lkly5Fz89AvvGqV6lLYx8hWlmttL1ZCPuWlHjwXxY,369
|
|
223
|
+
empathy_os/metrics/collector.py,sha256=WBrUklvZ9T80a5iz1XyRPvmteMOpftqBYHdBb4DpZao,766
|
|
218
224
|
empathy_os/metrics/prompt_metrics.py,sha256=C9N8uIynTP8XiGcwUeOoJRU48vLiS3uK0SKFmVefalo,6518
|
|
219
225
|
empathy_os/models/__init__.py,sha256=BixzbjDbweDr7siPxAKGvoFdYY5M3hxoml7pIiVSpPE,3485
|
|
220
226
|
empathy_os/models/__main__.py,sha256=V7uZJ_XT__ROZsmnEI4-U7hNsOH1oqzfnWb2o6xJ0g4,296
|
|
@@ -227,7 +233,7 @@ empathy_os/models/provider_config.py,sha256=RMGjWqjOCSUGEtudQU7uaTHQj1RgZFR9BlKd
|
|
|
227
233
|
empathy_os/models/registry.py,sha256=L9GZ5hQvCcGDcaorOTW7GX4Ndu1wnqxXtwSjQ5cT0_4,15555
|
|
228
234
|
empathy_os/models/tasks.py,sha256=mvxFZiiRgUGCHYdJE316Zp8PGhN5D1Ctx9Og3jz_Zy8,10396
|
|
229
235
|
empathy_os/models/telemetry.py,sha256=piNLJXmLvpWQQu6gFHcUpaLEaPmFzvzKTmO1TGEbfOU,52532
|
|
230
|
-
empathy_os/models/token_estimator.py,sha256=
|
|
236
|
+
empathy_os/models/token_estimator.py,sha256=15RsTew2aqA6yNuaUFDLX_CMjkIjRELWeWvlkDV0G4g,13488
|
|
231
237
|
empathy_os/models/validation.py,sha256=JYOXAy9Bj50uO9LHDbBRBK73sue8TLVVRtp6vhzp7ms,9152
|
|
232
238
|
empathy_os/monitoring/__init__.py,sha256=efAzUX0DYeUTGEES-MV6jI4iUYmhH-RkQYnVLnjSl-M,1507
|
|
233
239
|
empathy_os/monitoring/alerts.py,sha256=NZUbBXC_a5TD235cs3G-XKYxLIBpVCENyTW3zXjjUss,30281
|
|
@@ -297,12 +303,12 @@ empathy_os/socratic/success.py,sha256=wZoHBkfkzmZwwbEMNdUj8cWw4nLCyKFtGNcWHJDoV9
|
|
|
297
303
|
empathy_os/socratic/visual_editor.py,sha256=Nk2vJaTKtcwhHH04KTzRnAVyae6AeA03je2-y74d1kk,27912
|
|
298
304
|
empathy_os/socratic/web_ui.py,sha256=Sg7pSS1043ecVt_yYpSLnv_6pvGnt6aM742fXq6uJ7M,25375
|
|
299
305
|
empathy_os/telemetry/__init__.py,sha256=DpNi4Eglyj7mAA4XT3GFpidnvvbVNC7Qo1Te9Q5wQ44,1295
|
|
300
|
-
empathy_os/telemetry/agent_coordination.py,sha256=
|
|
301
|
-
empathy_os/telemetry/agent_tracking.py,sha256=
|
|
302
|
-
empathy_os/telemetry/approval_gates.py,sha256=
|
|
306
|
+
empathy_os/telemetry/agent_coordination.py,sha256=GhiPcSPzgQ032b6r1c86Ktf_2TcH63e23wkxLMm_fME,15898
|
|
307
|
+
empathy_os/telemetry/agent_tracking.py,sha256=cA_vqLSmmqVkeaiZLHLn38BoGeB1Y8VE7mRIU1dWClU,11672
|
|
308
|
+
empathy_os/telemetry/approval_gates.py,sha256=1h3vZyD3rBdVQ2og3kTsXtDFm52sf6w5j1XU4lPEYoM,19300
|
|
303
309
|
empathy_os/telemetry/cli.py,sha256=vuWuAgq8VyZBogqnD0DHI76MCg8WLVsjyTDKM1uh1sY,69474
|
|
304
|
-
empathy_os/telemetry/event_streaming.py,sha256
|
|
305
|
-
empathy_os/telemetry/feedback_loop.py,sha256=
|
|
310
|
+
empathy_os/telemetry/event_streaming.py,sha256=HSLBTw2svys-JKV530ht1CcFP9Ix038qHwbS424vY4g,13223
|
|
311
|
+
empathy_os/telemetry/feedback_loop.py,sha256=nKrm7tYKYXbmLZew2M4cETRhDbv7qerK8WZMsULhYx8,19314
|
|
306
312
|
empathy_os/telemetry/usage_tracker.py,sha256=Be409JSeweps3GEN3k66u_0I1594KTWoH_bRzelih9U,21184
|
|
307
313
|
empathy_os/test_generator/__init__.py,sha256=lSck9qlC32AO8qoQldk3UjjTRDPdAUgIOHGa-WvzCqI,919
|
|
308
314
|
empathy_os/test_generator/__main__.py,sha256=YY_HE1xg4zKZkHHAd6sSzWvJCvLFOtmpawCrNlGjWAc,345
|
|
@@ -321,7 +327,7 @@ empathy_os/workflow_patterns/registry.py,sha256=0U_XT0hdQ5fLHuEJlrvzjaCBUyeWDA67
|
|
|
321
327
|
empathy_os/workflow_patterns/structural.py,sha256=v1wbBU0pCQQKdNhPXM_fuPO8ptQCbEC_8X-OKgfhrk8,9432
|
|
322
328
|
empathy_os/workflows/__init__.py,sha256=iaOjOtiJzfLBKey5W_v1LT6o-yjrsWLbQNglH8tDN5I,19594
|
|
323
329
|
empathy_os/workflows/base.py,sha256=5pKLwkg6qULFhT2QFY5nMuxUnYLq__ZGL5ogNQcmDNA,99363
|
|
324
|
-
empathy_os/workflows/batch_processing.py,sha256=
|
|
330
|
+
empathy_os/workflows/batch_processing.py,sha256=hMODlPHV74iorBz9_405LqKYhk-3twIgmUyMRwATqGQ,11600
|
|
325
331
|
empathy_os/workflows/bug_predict.py,sha256=x49KCzdsa6t1Em0FBpFCigOj0FB-zhydW-Pc2jTVeYE,38378
|
|
326
332
|
empathy_os/workflows/builder.py,sha256=rRXevtS8v1FAGG91yhpSq6_dx5rvK_sZrF9ekdW40fE,8655
|
|
327
333
|
empathy_os/workflows/caching.py,sha256=y6Ws0KKCQ_R6D6PTbpc8dQRjb3tG4U2-KcwJvxFi07g,8134
|
|
@@ -375,23 +381,14 @@ empathy_os/workflows/keyboard_shortcuts/parsers.py,sha256=aws4HSjqBOrl-DQEOV9WeJ
|
|
|
375
381
|
empathy_os/workflows/keyboard_shortcuts/prompts.py,sha256=gcV2F2bAMjZUrbB13lOI4ixXzXm2TNWEZ4VbPhC7ITw,9164
|
|
376
382
|
empathy_os/workflows/keyboard_shortcuts/schema.py,sha256=MwvM63J9WTO6nqtwes5A04HH1dTa9XhJlD0SbFhsS5E,5806
|
|
377
383
|
empathy_os/workflows/keyboard_shortcuts/workflow.py,sha256=FOEQwc8IR0PEYim05zjR-uIlw6oPZHW9jQZ8skxXp0c,17678
|
|
378
|
-
empathy_os/workflows/progressive/README 2.md,sha256=ngn3ZC48LW-ON5Vow90dW4UvEWeOKuU7V-W4JdudqJo,13793
|
|
379
384
|
empathy_os/workflows/progressive/README.md,sha256=ngn3ZC48LW-ON5Vow90dW4UvEWeOKuU7V-W4JdudqJo,13793
|
|
380
|
-
empathy_os/workflows/progressive/__init__ 2.py,sha256=l18JX30ONk8H08Wm210k1W3AKat2tVzI840JqlzTBpU,2203
|
|
381
385
|
empathy_os/workflows/progressive/__init__.py,sha256=qkPVjO_MtHjxVcHMr5jbU9NMZTxVCGHXVD3EkngyCso,2179
|
|
382
|
-
empathy_os/workflows/progressive/cli 2.py,sha256=WIFxTBUsCL2hvKKoPFxYUgfhRFjMm-HVUjRp3Y-pvXA,6305
|
|
383
386
|
empathy_os/workflows/progressive/cli.py,sha256=jbl5jSk3wlbjGc00Ao06OO7aU53AZuVZVKJpJJAajX4,6122
|
|
384
|
-
empathy_os/workflows/progressive/core 2.py,sha256=891OttvWkFokBRVzvqVUxwpcNlgc7edlVYqGUCkn0vs,16101
|
|
385
387
|
empathy_os/workflows/progressive/core.py,sha256=2FzIsy0xKyP91-aMTGfERbX5Mow5CSlfR9RUmHU4GeM,16070
|
|
386
|
-
empathy_os/workflows/progressive/orchestrator 2.py,sha256=FsLcoRC8jujRcOV87PyN-0jzlLvStIQQqMFisxWWg0I,26786
|
|
387
388
|
empathy_os/workflows/progressive/orchestrator.py,sha256=LssTqdy4ufqhgpgS3QNZEdpeSkdg-MgpFyhW5zCJmAY,27733
|
|
388
|
-
empathy_os/workflows/progressive/reports 2.py,sha256=F2p3HyInGH8fT9-e_BsOVcgeQBtOugYsSyBn49yhhmc,17588
|
|
389
389
|
empathy_os/workflows/progressive/reports.py,sha256=VgymV6v7owodELf_QLQTwqGOEZjaFKclKxeQ0-vow0k,17631
|
|
390
|
-
empathy_os/workflows/progressive/telemetry 2.py,sha256=5ZjCmeGjtfESejocWdtvnm5iCTAQv5_3NRTYmuoIbVs,9427
|
|
391
390
|
empathy_os/workflows/progressive/telemetry.py,sha256=DiDTrkeKDjsWJnA57UrNfhbSG6qv9LaTP2wqHPcRscA,9398
|
|
392
|
-
empathy_os/workflows/progressive/test_gen 2.py,sha256=rNjjrQ8AFoXRe99X1Yk020gX6tcTxLh5pGMXXho8XRs,15802
|
|
393
391
|
empathy_os/workflows/progressive/test_gen.py,sha256=wdpjx-Tlz-dWqOwHwoblbgEABr7EjTRV2faXMTad1M0,15759
|
|
394
|
-
empathy_os/workflows/progressive/workflow 2.py,sha256=bGXKmB9UFLfDw5c9MsW6Zw4GSbknCP-9HnaIEX36nhY,21597
|
|
395
392
|
empathy_os/workflows/progressive/workflow.py,sha256=FD3MzRSMV1n_c1S8jSlLLIrcewcus6Niyml6-EMdk_E,21146
|
|
396
393
|
empathy_software_plugin/SOFTWARE_PLUGIN_README.md,sha256=XjvK2TXoI5nsgEZqh4RQIAxBiGVpvbXihIu7zF8KNYk,1399
|
|
397
394
|
empathy_software_plugin/__init__.py,sha256=OceDhLoQrjD0jTPlI4418608l97OT48u4SCL5yEDPQc,309
|
|
@@ -403,8 +400,8 @@ workflow_scaffolding/__init__.py,sha256=UpX5vjjjPjIaAKyIV1D4GxJzLUZy5DzdzgSkePYM
|
|
|
403
400
|
workflow_scaffolding/__main__.py,sha256=0qspuNoadTDqyskXTlT8Sahqau-XIxN35NHTSGVW6z4,236
|
|
404
401
|
workflow_scaffolding/cli.py,sha256=yLgvMsPbqI-LHt7UKXMuj-Dc0-44EqWiChldysrw1YQ,6763
|
|
405
402
|
workflow_scaffolding/generator.py,sha256=9eEmm324kVzf8BCtXEML090pyke06o0U7aGATzZIaPM,8869
|
|
406
|
-
empathy_framework-5.0.
|
|
407
|
-
empathy_framework-5.0.
|
|
408
|
-
empathy_framework-5.0.
|
|
409
|
-
empathy_framework-5.0.
|
|
410
|
-
empathy_framework-5.0.
|
|
403
|
+
empathy_framework-5.0.3.dist-info/METADATA,sha256=wyKjpZD6hL70hezsVORRmmaRUR3gkY8TjTl5xaJ0Psk,32879
|
|
404
|
+
empathy_framework-5.0.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
405
|
+
empathy_framework-5.0.3.dist-info/entry_points.txt,sha256=Ox9iu_2Fx66x5fyZ5L1NJrrFFekD-BZJIudHNzKwYPs,1565
|
|
406
|
+
empathy_framework-5.0.3.dist-info/top_level.txt,sha256=YJNBYFE2u5f2XnveFqYO-rZDcHR_aMOHAiIIf9tl72c,102
|
|
407
|
+
empathy_framework-5.0.3.dist-info/RECORD,,
|
empathy_llm_toolkit/providers.py
CHANGED
|
@@ -322,6 +322,93 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
322
322
|
},
|
|
323
323
|
)
|
|
324
324
|
|
|
325
|
+
def estimate_tokens(self, text: str) -> int:
|
|
326
|
+
"""Estimate token count using accurate token counter (overrides base class).
|
|
327
|
+
|
|
328
|
+
Uses tiktoken for fast local estimation (~98% accurate).
|
|
329
|
+
Falls back to heuristic if tiktoken unavailable.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
text: Text to count tokens for
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Estimated token count
|
|
336
|
+
"""
|
|
337
|
+
try:
|
|
338
|
+
from .utils.tokens import count_tokens
|
|
339
|
+
|
|
340
|
+
return count_tokens(text, model=self.model, use_api=False)
|
|
341
|
+
except ImportError:
|
|
342
|
+
# Fallback to base class heuristic if utils not available
|
|
343
|
+
return super().estimate_tokens(text)
|
|
344
|
+
|
|
345
|
+
def calculate_actual_cost(
|
|
346
|
+
self,
|
|
347
|
+
input_tokens: int,
|
|
348
|
+
output_tokens: int,
|
|
349
|
+
cache_creation_tokens: int = 0,
|
|
350
|
+
cache_read_tokens: int = 0,
|
|
351
|
+
) -> dict[str, Any]:
|
|
352
|
+
"""Calculate actual cost based on precise token counts.
|
|
353
|
+
|
|
354
|
+
Includes Anthropic prompt caching cost adjustments:
|
|
355
|
+
- Cache writes: 25% markup over standard input pricing
|
|
356
|
+
- Cache reads: 90% discount from standard input pricing
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
input_tokens: Regular input tokens (not cached)
|
|
360
|
+
output_tokens: Output tokens
|
|
361
|
+
cache_creation_tokens: Tokens written to cache
|
|
362
|
+
cache_read_tokens: Tokens read from cache
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Dictionary with cost breakdown:
|
|
366
|
+
- base_cost: Cost for regular input/output tokens
|
|
367
|
+
- cache_write_cost: Cost for cache creation (if any)
|
|
368
|
+
- cache_read_cost: Cost for cache reads (if any)
|
|
369
|
+
- total_cost: Total cost including all components
|
|
370
|
+
- savings: Amount saved by cache reads vs. full price
|
|
371
|
+
|
|
372
|
+
Example:
|
|
373
|
+
>>> provider = AnthropicProvider(api_key="...")
|
|
374
|
+
>>> cost = provider.calculate_actual_cost(
|
|
375
|
+
... input_tokens=1000,
|
|
376
|
+
... output_tokens=500,
|
|
377
|
+
... cache_read_tokens=10000
|
|
378
|
+
... )
|
|
379
|
+
>>> cost["total_cost"]
|
|
380
|
+
0.0105 # Significantly less than without cache
|
|
381
|
+
"""
|
|
382
|
+
# Get pricing for this model
|
|
383
|
+
model_info = self.get_model_info()
|
|
384
|
+
input_price_per_million = model_info["cost_per_1m_input"]
|
|
385
|
+
output_price_per_million = model_info["cost_per_1m_output"]
|
|
386
|
+
|
|
387
|
+
# Base cost (non-cached tokens)
|
|
388
|
+
base_cost = (input_tokens / 1_000_000) * input_price_per_million
|
|
389
|
+
base_cost += (output_tokens / 1_000_000) * output_price_per_million
|
|
390
|
+
|
|
391
|
+
# Cache write cost (25% markup)
|
|
392
|
+
cache_write_price = input_price_per_million * 1.25
|
|
393
|
+
cache_write_cost = (cache_creation_tokens / 1_000_000) * cache_write_price
|
|
394
|
+
|
|
395
|
+
# Cache read cost (90% discount = 10% of input price)
|
|
396
|
+
cache_read_price = input_price_per_million * 0.1
|
|
397
|
+
cache_read_cost = (cache_read_tokens / 1_000_000) * cache_read_price
|
|
398
|
+
|
|
399
|
+
# Calculate savings from cache reads
|
|
400
|
+
full_price_for_cached = (cache_read_tokens / 1_000_000) * input_price_per_million
|
|
401
|
+
savings = full_price_for_cached - cache_read_cost
|
|
402
|
+
|
|
403
|
+
return {
|
|
404
|
+
"base_cost": round(base_cost, 6),
|
|
405
|
+
"cache_write_cost": round(cache_write_cost, 6),
|
|
406
|
+
"cache_read_cost": round(cache_read_cost, 6),
|
|
407
|
+
"total_cost": round(base_cost + cache_write_cost + cache_read_cost, 6),
|
|
408
|
+
"savings": round(savings, 6),
|
|
409
|
+
"currency": "USD",
|
|
410
|
+
}
|
|
411
|
+
|
|
325
412
|
|
|
326
413
|
class AnthropicBatchProvider:
|
|
327
414
|
"""Provider for Anthropic Batch API (50% cost reduction).
|
|
@@ -370,7 +457,8 @@ class AnthropicBatchProvider:
|
|
|
370
457
|
"""Create a batch job.
|
|
371
458
|
|
|
372
459
|
Args:
|
|
373
|
-
requests: List of request dicts with 'custom_id'
|
|
460
|
+
requests: List of request dicts with 'custom_id' and 'params' containing message creation parameters.
|
|
461
|
+
Format: [{"custom_id": "id1", "params": {"model": "...", "messages": [...], "max_tokens": 1024}}]
|
|
374
462
|
job_id: Optional job identifier for tracking (unused, for API compatibility)
|
|
375
463
|
|
|
376
464
|
Returns:
|
|
@@ -384,22 +472,46 @@ class AnthropicBatchProvider:
|
|
|
384
472
|
>>> requests = [
|
|
385
473
|
... {
|
|
386
474
|
... "custom_id": "task_1",
|
|
387
|
-
... "
|
|
388
|
-
...
|
|
389
|
-
...
|
|
475
|
+
... "params": {
|
|
476
|
+
... "model": "claude-sonnet-4-5-20250929",
|
|
477
|
+
... "messages": [{"role": "user", "content": "Test"}],
|
|
478
|
+
... "max_tokens": 1024
|
|
479
|
+
... }
|
|
390
480
|
... }
|
|
391
481
|
... ]
|
|
392
482
|
>>> batch_id = provider.create_batch(requests)
|
|
393
483
|
>>> print(f"Batch created: {batch_id}")
|
|
394
|
-
Batch created:
|
|
484
|
+
Batch created: msgbatch_abc123
|
|
395
485
|
"""
|
|
396
486
|
if not requests:
|
|
397
487
|
raise ValueError("requests cannot be empty")
|
|
398
488
|
|
|
489
|
+
# Validate and convert old format to new format if needed
|
|
490
|
+
formatted_requests = []
|
|
491
|
+
for req in requests:
|
|
492
|
+
if "params" not in req:
|
|
493
|
+
# Old format: convert to new format with params wrapper
|
|
494
|
+
formatted_req = {
|
|
495
|
+
"custom_id": req.get("custom_id", f"req_{id(req)}"),
|
|
496
|
+
"params": {
|
|
497
|
+
"model": req.get("model", "claude-sonnet-4-5-20250929"),
|
|
498
|
+
"messages": req.get("messages", []),
|
|
499
|
+
"max_tokens": req.get("max_tokens", 4096),
|
|
500
|
+
},
|
|
501
|
+
}
|
|
502
|
+
# Copy other optional params
|
|
503
|
+
for key in ["temperature", "system", "stop_sequences"]:
|
|
504
|
+
if key in req:
|
|
505
|
+
formatted_req["params"][key] = req[key]
|
|
506
|
+
formatted_requests.append(formatted_req)
|
|
507
|
+
else:
|
|
508
|
+
formatted_requests.append(req)
|
|
509
|
+
|
|
399
510
|
try:
|
|
400
|
-
|
|
511
|
+
# Use correct Message Batches API endpoint
|
|
512
|
+
batch = self.client.messages.batches.create(requests=formatted_requests)
|
|
401
513
|
self._batch_jobs[batch.id] = batch
|
|
402
|
-
logger.info(f"Created batch {batch.id} with {len(
|
|
514
|
+
logger.info(f"Created batch {batch.id} with {len(formatted_requests)} requests")
|
|
403
515
|
return batch.id
|
|
404
516
|
except Exception as e:
|
|
405
517
|
logger.error(f"Failed to create batch: {e}")
|
|
@@ -412,18 +524,20 @@ class AnthropicBatchProvider:
|
|
|
412
524
|
batch_id: Batch job ID
|
|
413
525
|
|
|
414
526
|
Returns:
|
|
415
|
-
|
|
416
|
-
- "
|
|
417
|
-
- "
|
|
418
|
-
- "
|
|
527
|
+
MessageBatch object with processing_status field:
|
|
528
|
+
- "in_progress": Batch is being processed
|
|
529
|
+
- "canceling": Cancellation initiated
|
|
530
|
+
- "ended": Batch processing ended (check request_counts for success/errors)
|
|
419
531
|
|
|
420
532
|
Example:
|
|
421
|
-
>>> status = provider.get_batch_status("
|
|
422
|
-
>>> print(status.
|
|
423
|
-
|
|
533
|
+
>>> status = provider.get_batch_status("msgbatch_abc123")
|
|
534
|
+
>>> print(status.processing_status)
|
|
535
|
+
in_progress
|
|
536
|
+
>>> print(f"Succeeded: {status.request_counts.succeeded}")
|
|
424
537
|
"""
|
|
425
538
|
try:
|
|
426
|
-
|
|
539
|
+
# Use correct Message Batches API endpoint
|
|
540
|
+
batch = self.client.messages.batches.retrieve(batch_id)
|
|
427
541
|
self._batch_jobs[batch_id] = batch
|
|
428
542
|
return batch
|
|
429
543
|
except Exception as e:
|
|
@@ -437,25 +551,37 @@ class AnthropicBatchProvider:
|
|
|
437
551
|
batch_id: Batch job ID
|
|
438
552
|
|
|
439
553
|
Returns:
|
|
440
|
-
List of result dicts
|
|
554
|
+
List of result dicts. Each dict contains:
|
|
555
|
+
- custom_id: Request identifier
|
|
556
|
+
- result: Either {"type": "succeeded", "message": {...}} or {"type": "errored", "error": {...}}
|
|
441
557
|
|
|
442
558
|
Raises:
|
|
443
|
-
ValueError: If batch
|
|
559
|
+
ValueError: If batch has not ended processing
|
|
444
560
|
RuntimeError: If API call fails
|
|
445
561
|
|
|
446
562
|
Example:
|
|
447
|
-
>>> results = provider.get_batch_results("
|
|
563
|
+
>>> results = provider.get_batch_results("msgbatch_abc123")
|
|
448
564
|
>>> for result in results:
|
|
449
|
-
...
|
|
565
|
+
... if result['result']['type'] == 'succeeded':
|
|
566
|
+
... message = result['result']['message']
|
|
567
|
+
... print(f"{result['custom_id']}: {message.content[0].text}")
|
|
568
|
+
... else:
|
|
569
|
+
... error = result['result']['error']
|
|
570
|
+
... print(f"{result['custom_id']}: Error {error['type']}")
|
|
450
571
|
"""
|
|
451
572
|
status = self.get_batch_status(batch_id)
|
|
452
573
|
|
|
453
|
-
|
|
454
|
-
|
|
574
|
+
# Check processing_status instead of status
|
|
575
|
+
if status.processing_status != "ended":
|
|
576
|
+
raise ValueError(
|
|
577
|
+
f"Batch {batch_id} has not ended processing (status: {status.processing_status})"
|
|
578
|
+
)
|
|
455
579
|
|
|
456
580
|
try:
|
|
457
|
-
|
|
458
|
-
|
|
581
|
+
# Use correct Message Batches API endpoint
|
|
582
|
+
# results() returns an iterator, convert to list
|
|
583
|
+
results_iterator = self.client.messages.batches.results(batch_id)
|
|
584
|
+
return list(results_iterator)
|
|
459
585
|
except Exception as e:
|
|
460
586
|
logger.error(f"Failed to get batch results for {batch_id}: {e}")
|
|
461
587
|
raise RuntimeError(f"Failed to get batch results: {e}") from e
|
|
@@ -474,15 +600,15 @@ class AnthropicBatchProvider:
|
|
|
474
600
|
timeout: Maximum wait time in seconds (default: 86400 = 24 hours)
|
|
475
601
|
|
|
476
602
|
Returns:
|
|
477
|
-
Batch results when
|
|
603
|
+
Batch results when processing ends
|
|
478
604
|
|
|
479
605
|
Raises:
|
|
480
606
|
TimeoutError: If batch doesn't complete within timeout
|
|
481
|
-
RuntimeError: If batch processing
|
|
607
|
+
RuntimeError: If batch had errors during processing
|
|
482
608
|
|
|
483
609
|
Example:
|
|
484
610
|
>>> results = await provider.wait_for_batch(
|
|
485
|
-
... "
|
|
611
|
+
... "msgbatch_abc123",
|
|
486
612
|
... poll_interval=300, # Check every 5 minutes
|
|
487
613
|
... )
|
|
488
614
|
>>> print(f"Batch completed: {len(results)} results")
|
|
@@ -493,22 +619,36 @@ class AnthropicBatchProvider:
|
|
|
493
619
|
while True:
|
|
494
620
|
status = self.get_batch_status(batch_id)
|
|
495
621
|
|
|
496
|
-
if
|
|
497
|
-
|
|
498
|
-
|
|
622
|
+
# Check if batch processing has ended
|
|
623
|
+
if status.processing_status == "ended":
|
|
624
|
+
# Check request counts to see if there were errors
|
|
625
|
+
counts = status.request_counts
|
|
626
|
+
logger.info(
|
|
627
|
+
f"Batch {batch_id} ended: "
|
|
628
|
+
f"{counts.succeeded} succeeded, {counts.errored} errored, "
|
|
629
|
+
f"{counts.canceled} canceled, {counts.expired} expired"
|
|
630
|
+
)
|
|
499
631
|
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
raise RuntimeError(f"Batch {batch_id} failed: {error_msg}")
|
|
632
|
+
# Return results even if some requests failed
|
|
633
|
+
# The caller can inspect individual results for errors
|
|
634
|
+
return self.get_batch_results(batch_id)
|
|
504
635
|
|
|
505
636
|
# Check timeout
|
|
506
637
|
elapsed = (datetime.now() - start_time).total_seconds()
|
|
507
638
|
if elapsed > timeout:
|
|
508
639
|
raise TimeoutError(f"Batch {batch_id} did not complete within {timeout}s")
|
|
509
640
|
|
|
510
|
-
# Log progress
|
|
511
|
-
|
|
641
|
+
# Log progress with request counts
|
|
642
|
+
try:
|
|
643
|
+
counts = status.request_counts
|
|
644
|
+
logger.debug(
|
|
645
|
+
f"Batch {batch_id} status: {status.processing_status} "
|
|
646
|
+
f"(processing: {counts.processing}, elapsed: {elapsed:.0f}s)"
|
|
647
|
+
)
|
|
648
|
+
except AttributeError:
|
|
649
|
+
logger.debug(
|
|
650
|
+
f"Batch {batch_id} status: {status.processing_status} (elapsed: {elapsed:.0f}s)"
|
|
651
|
+
)
|
|
512
652
|
|
|
513
653
|
# Wait before next poll
|
|
514
654
|
await asyncio.sleep(poll_interval)
|