tokenator 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,151 +0,0 @@
1
- """OpenAI client wrapper with token usage tracking."""
2
-
3
- from typing import Any, Dict, Optional, TypeVar, Union, overload, Iterator, AsyncIterator
4
- import logging
5
-
6
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
7
- from openai.types.chat import ChatCompletion, ChatCompletionChunk
8
-
9
- from .models import Usage, TokenUsageStats
10
- from .base_wrapper import BaseWrapper, ResponseType
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- class BaseOpenAIWrapper(BaseWrapper):
15
- provider = "openai"
16
-
17
- def _process_response_usage(self, response: ResponseType) -> Optional[TokenUsageStats]:
18
- """Process and log usage statistics from a response."""
19
- try:
20
- if isinstance(response, ChatCompletion):
21
- if response.usage is None:
22
- return None
23
- usage = Usage(
24
- prompt_tokens=response.usage.prompt_tokens,
25
- completion_tokens=response.usage.completion_tokens,
26
- total_tokens=response.usage.total_tokens,
27
- )
28
- return TokenUsageStats(model=response.model, usage=usage)
29
-
30
- elif isinstance(response, dict):
31
- usage_dict = response.get('usage')
32
- if not usage_dict:
33
- return None
34
- usage = Usage(
35
- prompt_tokens=usage_dict.get('prompt_tokens', 0),
36
- completion_tokens=usage_dict.get('completion_tokens', 0),
37
- total_tokens=usage_dict.get('total_tokens', 0)
38
- )
39
- return TokenUsageStats(
40
- model=response.get('model', 'unknown'),
41
- usage=usage
42
- )
43
- except Exception as e:
44
- logger.warning("Failed to process usage stats: %s", str(e))
45
- return None
46
- return None
47
-
48
- @property
49
- def chat(self):
50
- return self
51
-
52
- @property
53
- def completions(self):
54
- return self
55
-
56
- class OpenAIWrapper(BaseOpenAIWrapper):
57
- def create(self, *args: Any, execution_id: Optional[str] = None, **kwargs: Any) -> Union[ChatCompletion, Iterator[ChatCompletion]]:
58
- """Create a chat completion and log token usage."""
59
- logger.debug("Creating chat completion with args: %s, kwargs: %s", args, kwargs)
60
-
61
- response = self.client.chat.completions.create(*args, **kwargs)
62
-
63
- if not kwargs.get('stream', False):
64
- usage_data = self._process_response_usage(response)
65
- if usage_data:
66
- self._log_usage(usage_data, execution_id=execution_id)
67
- return response
68
-
69
- return self._wrap_streaming_response(response, execution_id)
70
-
71
- def _wrap_streaming_response(self, response_iter: Stream[ChatCompletionChunk], execution_id: Optional[str]) -> Iterator[ChatCompletionChunk]:
72
- """Wrap streaming response to capture final usage stats"""
73
- chunks_with_usage = []
74
- for chunk in response_iter:
75
- if isinstance(chunk, ChatCompletionChunk) and chunk.usage is not None:
76
- chunks_with_usage.append(chunk)
77
- yield chunk
78
-
79
- if len(chunks_with_usage) > 0:
80
- usage_data: TokenUsageStats = TokenUsageStats(model=chunks_with_usage[0].model, usage=Usage())
81
- for chunk in chunks_with_usage:
82
- usage_data.usage.prompt_tokens += chunk.usage.prompt_tokens
83
- usage_data.usage.completion_tokens += chunk.usage.completion_tokens
84
- usage_data.usage.total_tokens += chunk.usage.total_tokens
85
-
86
- self._log_usage(usage_data, execution_id=execution_id)
87
-
88
-
89
- class AsyncOpenAIWrapper(BaseOpenAIWrapper):
90
- async def create(self, *args: Any, execution_id: Optional[str] = None, **kwargs: Any) -> Union[ChatCompletion, AsyncIterator[ChatCompletion]]:
91
- """Create a chat completion and log token usage."""
92
- logger.debug("Creating chat completion with args: %s, kwargs: %s", args, kwargs)
93
-
94
- if kwargs.get('stream', False):
95
- response = await self.client.chat.completions.create(*args, **kwargs)
96
- return self._wrap_streaming_response(response, execution_id)
97
-
98
- response = await self.client.chat.completions.create(*args, **kwargs)
99
- usage_data = self._process_response_usage(response)
100
- if usage_data:
101
- self._log_usage(usage_data, execution_id=execution_id)
102
- return response
103
-
104
- async def _wrap_streaming_response(self, response_iter: AsyncStream[ChatCompletionChunk], execution_id: Optional[str]) -> AsyncIterator[ChatCompletionChunk]:
105
- """Wrap streaming response to capture final usage stats"""
106
- chunks_with_usage = []
107
- async for chunk in response_iter:
108
- if isinstance(chunk, ChatCompletionChunk) and chunk.usage is not None:
109
- chunks_with_usage.append(chunk)
110
- yield chunk
111
-
112
- if len(chunks_with_usage) > 0:
113
- usage_data: TokenUsageStats = TokenUsageStats(model=chunks_with_usage[0].model, usage=Usage())
114
- for chunk in chunks_with_usage:
115
- usage_data.usage.prompt_tokens += chunk.usage.prompt_tokens
116
- usage_data.usage.completion_tokens += chunk.usage.completion_tokens
117
- usage_data.usage.total_tokens += chunk.usage.total_tokens
118
-
119
- self._log_usage(usage_data, execution_id=execution_id)
120
-
121
- @overload
122
- def tokenator_openai(
123
- client: OpenAI,
124
- db_path: Optional[str] = None,
125
- ) -> OpenAIWrapper: ...
126
-
127
- @overload
128
- def tokenator_openai(
129
- client: AsyncOpenAI,
130
- db_path: Optional[str] = None,
131
- ) -> AsyncOpenAIWrapper: ...
132
-
133
- def tokenator_openai(
134
- client: Union[OpenAI, AsyncOpenAI],
135
- db_path: Optional[str] = None,
136
- ) -> Union[OpenAIWrapper, AsyncOpenAIWrapper]:
137
- """Create a token-tracking wrapper for an OpenAI client.
138
-
139
- Args:
140
- client: OpenAI or AsyncOpenAI client instance
141
- db_path: Optional path to SQLite database for token tracking
142
- """
143
- if isinstance(client, OpenAI):
144
- return OpenAIWrapper(client=client, db_path=db_path)
145
-
146
- if isinstance(client, AsyncOpenAI):
147
- return AsyncOpenAIWrapper(client=client, db_path=db_path)
148
-
149
- raise ValueError("Client must be an instance of OpenAI or AsyncOpenAI")
150
-
151
- __all__ = ["tokenator_openai"]
@@ -1,17 +0,0 @@
1
- tokenator/__init__.py,sha256=ZKe0zMGa_AqOeXUVgYqivUavht_byk03XNFEvAnxqsA,576
2
- tokenator/base_wrapper.py,sha256=vSu_pStKYulho7_5g0jMCNf84KRxC4kTKep0v8YE61M,2377
3
- tokenator/client_anthropic.py,sha256=1ejWIZBxtk-mWTVaKWeMUvS2hZ_Dn-vNKYa3yopdjAU,6714
4
- tokenator/client_openai.py,sha256=_4jvchKzpCFhpioMZTYIWV7_ephQp1abMCtswUDJv1M,6339
5
- tokenator/create_migrations.py,sha256=n1OVbWrdwvBdaN-Aqqt1gLCPQidfoQfeJtGsab_epGk,746
6
- tokenator/migrations/env.py,sha256=LR_hONDa8Saiq9CyNUpH8kZCi5PtXLaDlfABs_CePkk,1415
7
- tokenator/migrations/script.py.mako,sha256=nJL-tbLQE0Qy4P9S4r4ntNAcikPtoFUlvXe6xvm9ot8,635
8
- tokenator/migrations/versions/f6f1f2437513_initial_migration.py,sha256=DvHcjnREmUHZVX9q1e6PS4wNK_d4qGw-8pz0eS4_3mE,1860
9
- tokenator/migrations.py,sha256=BFgZRsdIx-Qs_WwDaH6cyi2124mLf5hA8VrIlW7f7Mg,1134
10
- tokenator/models.py,sha256=EprE_MMJxDS-YXlcIQLZzfekH7xTYbeOC3bx3B2osVw,1171
11
- tokenator/schemas.py,sha256=V7NYfY9eZvH3J6uOwXJz4dSAU6WYzINRnfFi1wWsTcc,2280
12
- tokenator/usage.py,sha256=aHjGwzDzaiVznahNk5HqVyk3IxDo5FtFVfOUCeE7DZ4,7833
13
- tokenator/utils.py,sha256=5mDiGHgt4koCY0onHwkRjwZIuAgP6QvrDZCwD20Sdk8,1969
14
- tokenator-0.1.8.dist-info/LICENSE,sha256=wdG-B6-ODk8RQ4jq5uXSn0w1UWTzCH_MMyvh7AwtGns,1074
15
- tokenator-0.1.8.dist-info/METADATA,sha256=1xgNdiPKTJlnBCPH6iMfi7-LoOl-t9soFzH_5V_eYIk,2444
16
- tokenator-0.1.8.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
17
- tokenator-0.1.8.dist-info/RECORD,,