payi 0.1.0a35__py3-none-any.whl → 0.1.0a36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of payi might be problematic. Click here for more details.

payi/_models.py CHANGED
@@ -179,14 +179,14 @@ class BaseModel(pydantic.BaseModel):
179
179
  @classmethod
180
180
  @override
181
181
  def construct( # pyright: ignore[reportIncompatibleMethodOverride]
182
- cls: Type[ModelT],
182
+ __cls: Type[ModelT],
183
183
  _fields_set: set[str] | None = None,
184
184
  **values: object,
185
185
  ) -> ModelT:
186
- m = cls.__new__(cls)
186
+ m = __cls.__new__(__cls)
187
187
  fields_values: dict[str, object] = {}
188
188
 
189
- config = get_model_config(cls)
189
+ config = get_model_config(__cls)
190
190
  populate_by_name = (
191
191
  config.allow_population_by_field_name
192
192
  if isinstance(config, _ConfigProtocol)
@@ -196,7 +196,7 @@ class BaseModel(pydantic.BaseModel):
196
196
  if _fields_set is None:
197
197
  _fields_set = set()
198
198
 
199
- model_fields = get_model_fields(cls)
199
+ model_fields = get_model_fields(__cls)
200
200
  for name, field in model_fields.items():
201
201
  key = field.alias
202
202
  if key is None or (key not in values and populate_by_name):
payi/_response.py CHANGED
@@ -136,6 +136,8 @@ class BaseAPIResponse(Generic[R]):
136
136
  if cast_to and is_annotated_type(cast_to):
137
137
  cast_to = extract_type_arg(cast_to, 0)
138
138
 
139
+ origin = get_origin(cast_to) or cast_to
140
+
139
141
  if self._is_sse_stream:
140
142
  if to:
141
143
  if not is_stream_class_type(to):
@@ -195,8 +197,6 @@ class BaseAPIResponse(Generic[R]):
195
197
  if cast_to == bool:
196
198
  return cast(R, response.text.lower() == "true")
197
199
 
198
- origin = get_origin(cast_to) or cast_to
199
-
200
200
  if origin == APIResponse:
201
201
  raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
202
202
 
@@ -210,7 +210,13 @@ class BaseAPIResponse(Generic[R]):
210
210
  raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
211
211
  return cast(R, response)
212
212
 
213
- if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
213
+ if (
214
+ inspect.isclass(
215
+ origin # pyright: ignore[reportUnknownArgumentType]
216
+ )
217
+ and not issubclass(origin, BaseModel)
218
+ and issubclass(origin, pydantic.BaseModel)
219
+ ):
214
220
  raise TypeError("Pydantic models must subclass our base model type, e.g. `from payi import BaseModel`")
215
221
 
216
222
  if (
payi/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "payi"
4
- __version__ = "0.1.0-alpha.35" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.36" # x-release-please-version
@@ -0,0 +1,97 @@
1
+ import logging
2
+ from typing import Any
3
+
4
+ from wrapt import wrap_function_wrapper # type: ignore
5
+
6
+ from payi.types import IngestUnitsParams
7
+ from payi.types.ingest_units_params import Units
8
+
9
+ from .instrument import PayiInstrumentor
10
+
11
+
12
+ class AnthropicIntrumentor:
13
+ @staticmethod
14
+ def instrument(instrumentor: PayiInstrumentor) -> None:
15
+ try:
16
+ import anthropic # type: ignore # noqa: F401 I001
17
+
18
+ # wrap_function_wrapper(
19
+ # "anthropic.resources.completions",
20
+ # "Completions.create",
21
+ # chat_wrapper(instrumentor),
22
+ # )
23
+
24
+ wrap_function_wrapper(
25
+ "anthropic.resources.messages",
26
+ "Messages.create",
27
+ chat_wrapper(instrumentor),
28
+ )
29
+
30
+ wrap_function_wrapper(
31
+ "anthropic.resources.messages",
32
+ "Messages.stream",
33
+ chat_wrapper(instrumentor),
34
+ )
35
+
36
+ except Exception as e:
37
+ logging.debug(f"Error instrumenting anthropic: {e}")
38
+ return
39
+
40
+
41
+ @PayiInstrumentor.payi_wrapper
42
+ def chat_wrapper(
43
+ instrumentor: PayiInstrumentor,
44
+ wrapped: Any,
45
+ instance: Any,
46
+ args: Any,
47
+ kwargs: Any,
48
+ ) -> Any:
49
+ return instrumentor.chat_wrapper(
50
+ "system.anthropic",
51
+ process_chunk,
52
+ process_synchronous_response,
53
+ wrapped,
54
+ instance,
55
+ args,
56
+ kwargs,
57
+ )
58
+
59
+
60
+ def process_chunk(chunk: Any, ingest: IngestUnitsParams) -> None:
61
+ if chunk.type == "message_start":
62
+ usage = chunk.message.usage
63
+ units = ingest["units"]
64
+
65
+ units["text"] = Units(input=usage.input_tokens, output=0)
66
+
67
+ if hasattr(usage, "cache_creation_input_tokens") and usage.cache_creation_input_tokens > 0:
68
+ text_cache_write = usage.cache_creation_input_tokens
69
+ units["text_cache_write"] = Units(input=text_cache_write, output=0)
70
+
71
+ if hasattr(usage, "cache_read_input_tokens") and usage.cache_read_input_tokens > 0:
72
+ text_cache_read = usage.cache_read_input_tokens
73
+ units["text_cache_read"] = Units(input=text_cache_read, output=0)
74
+
75
+ elif chunk.type == "message_delta":
76
+ usage = chunk.usage
77
+ ingest["units"]["text"]["output"] = usage.output_tokens
78
+
79
+
80
+ def process_synchronous_response(response: Any, ingest: IngestUnitsParams, log_prompt_and_response: bool) -> None:
81
+ usage = response.usage
82
+ input = usage.input_tokens
83
+ ouptut = usage.output_tokens
84
+ units: dict[str, Units] = ingest["units"]
85
+
86
+ if hasattr(usage, "cache_creation_input_tokens") and usage.cache_creation_input_tokens > 0:
87
+ text_cache_write = usage.cache_creation_input_tokens
88
+ units["text_cache_write"] = Units(input=text_cache_write, output=0)
89
+
90
+ if hasattr(usage, "cache_read_input_tokens") and usage.cache_read_input_tokens > 0:
91
+ text_cache_read = usage.cache_read_input_tokens
92
+ units["text_cache_read"] = Units(input=text_cache_read, output=0)
93
+
94
+ units["text"] = Units(input=input, output=ouptut)
95
+
96
+ if log_prompt_and_response:
97
+ ingest["provider_response_json"] = response.to_json()
@@ -0,0 +1,7 @@
1
+ from enum import Enum
2
+
3
+
4
+ class Instruments(Enum):
5
+ ALL = "all"
6
+ OPENAI = "openai"
7
+ ANTHROPIC = "anthropic"
@@ -0,0 +1,89 @@
1
+ import json
2
+ import logging
3
+ from typing import Any
4
+ from importlib.metadata import version
5
+
6
+ from wrapt import wrap_function_wrapper # type: ignore
7
+
8
+ from payi.types import IngestUnitsParams
9
+ from payi.types.ingest_units_params import Units
10
+
11
+ from .instrument import PayiInstrumentor
12
+
13
+
14
+ class OpenAiInstrumentor:
15
+ @staticmethod
16
+ def instrument(instrumentor: PayiInstrumentor) -> None:
17
+ try:
18
+ from openai import OpenAI # type: ignore # noqa: F401 I001
19
+
20
+ wrap_function_wrapper(
21
+ "openai.resources.chat.completions",
22
+ "Completions.create",
23
+ chat_wrapper(instrumentor),
24
+ )
25
+ except Exception as e:
26
+ logging.debug(f"Error instrumenting openai: {e}")
27
+ return
28
+
29
+
30
+ @PayiInstrumentor.payi_wrapper
31
+ def chat_wrapper(
32
+ instrumentor: PayiInstrumentor,
33
+ wrapped: Any,
34
+ instance: Any,
35
+ args: Any,
36
+ kwargs: Any,
37
+ ) -> Any:
38
+ return instrumentor.chat_wrapper(
39
+ "system.openai",
40
+ process_chat_chunk,
41
+ process_chat_synchronous_response,
42
+ wrapped,
43
+ instance,
44
+ args,
45
+ kwargs,
46
+ )
47
+
48
+
49
+ def process_chat_synchronous_response(response: str, ingest: IngestUnitsParams, log_prompt_and_response: bool) -> None:
50
+ response_dict = model_to_dict(response)
51
+
52
+ add_usage_units(response_dict["usage"], ingest["units"])
53
+
54
+ if log_prompt_and_response:
55
+ ingest["provider_response_json"] = [json.dumps(response_dict)]
56
+
57
+
58
+ def process_chat_chunk(chunk: Any, ingest: IngestUnitsParams) -> None:
59
+ model = model_to_dict(chunk)
60
+ usage = model.get("usage")
61
+ if usage:
62
+ add_usage_units(usage, ingest["units"])
63
+
64
+
65
+ def model_to_dict(model: Any) -> Any:
66
+ if version("pydantic") < "2.0.0":
67
+ return model.dict()
68
+ if hasattr(model, "model_dump"):
69
+ return model.model_dump()
70
+ elif hasattr(model, "parse"): # Raw API response
71
+ return model_to_dict(model.parse())
72
+ else:
73
+ return model
74
+
75
+
76
+ def add_usage_units(usage: "dict[str, Any]", units: "dict[str, Units]") -> None:
77
+ input = usage["prompt_tokens"] if "prompt_tokens" in usage else 0
78
+ output = usage["completion_tokens"] if "completion_tokens" in usage else 0
79
+ input_cache = 0
80
+
81
+ prompt_tokens_details = usage.get("prompt_tokens_details")
82
+ if prompt_tokens_details:
83
+ input_cache = prompt_tokens_details.get("cached_tokens", 0)
84
+ if input_cache != 0:
85
+ units["text_cache_read"] = Units(input=input_cache, output=0)
86
+
87
+ input -= input_cache
88
+
89
+ units["text"] = Units(input=input, output=output)
payi/lib/Stopwatch.py ADDED
@@ -0,0 +1,27 @@
1
+ import time
2
+ from typing import Optional
3
+
4
+
5
+ class Stopwatch:
6
+ def __init__(self) -> None:
7
+ self.start_time: Optional[float] = None
8
+ self.end_time: Optional[float] = None
9
+
10
+ def start(self) -> None:
11
+ self.start_time = time.perf_counter()
12
+
13
+ def stop(self) -> None:
14
+ self.end_time = time.perf_counter()
15
+
16
+ def elapsed_s(self) -> float:
17
+ if self.start_time is None:
18
+ raise ValueError("Stopwatch has not been started")
19
+ if self.end_time is None:
20
+ return time.perf_counter() - self.start_time
21
+ return self.end_time - self.start_time
22
+
23
+ def elapsed_ms(self) -> float:
24
+ return self.elapsed_s() * 1000
25
+
26
+ def elapsed_ms_int(self) -> int:
27
+ return int(self.elapsed_ms())
payi/lib/helpers.py CHANGED
@@ -3,28 +3,30 @@ from typing import Dict, List, Union
3
3
 
4
4
 
5
5
  def create_limit_header_from_ids(limit_ids: List[str]) -> Dict[str, str]:
6
- if not isinstance(limit_ids, list): # type: ignore
6
+ if not isinstance(limit_ids, list): # type: ignore
7
7
  raise TypeError("limit_ids must be a list")
8
8
 
9
- valid_ids = [id.strip() for id in limit_ids if isinstance(id, str) and id.strip()] # type: ignore
9
+ valid_ids = [id.strip() for id in limit_ids if isinstance(id, str) and id.strip()] # type: ignore
10
10
 
11
11
  return {"xProxy-Limit-IDs": ",".join(valid_ids)} if valid_ids else {}
12
12
 
13
+
13
14
  def create_request_header_from_tags(request_tags: List[str]) -> Dict[str, str]:
14
- if not isinstance(request_tags, list): # type: ignore
15
+ if not isinstance(request_tags, list): # type: ignore
15
16
  raise TypeError("request_tags must be a list")
16
17
 
17
- valid_tags = [tag.strip() for tag in request_tags if isinstance(tag, str) and tag.strip()] # type: ignore
18
-
18
+ valid_tags = [tag.strip() for tag in request_tags if isinstance(tag, str) and tag.strip()] # type: ignore
19
+
19
20
  return {"xProxy-Request-Tags": ",".join(valid_tags)} if valid_tags else {}
20
21
 
22
+
21
23
  def create_headers(
22
- limit_ids: Union[List[str], None] = None,
24
+ limit_ids: Union[List[str], None] = None,
23
25
  request_tags: Union[List[str], None] = None,
24
26
  user_id: Union[str, None] = None,
25
- experience_id: Union[str, None] = None,
26
- experience_name: Union[str, None] = None,
27
- ) -> Dict[str, str]:
27
+ experience_id: Union[str, None] = None,
28
+ experience_name: Union[str, None] = None,
29
+ ) -> Dict[str, str]:
28
30
  headers: Dict[str, str] = {}
29
31
 
30
32
  if limit_ids:
@@ -38,4 +40,4 @@ def create_headers(
38
40
  if experience_name:
39
41
  headers.update({"xProxy-Experience-Name": experience_name})
40
42
 
41
- return headers
43
+ return headers