payi 0.1.0a80__py3-none-any.whl → 0.1.0a82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of payi might be problematic. Click here for more details.

payi/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "payi"
4
- __version__ = "0.1.0-alpha.80" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.82" # x-release-please-version
@@ -1,4 +1,3 @@
1
- import logging
2
1
  from typing import Any, Union, Optional, Sequence
3
2
  from typing_extensions import override
4
3
 
@@ -48,7 +47,7 @@ class AnthropicInstrumentor:
48
47
  )
49
48
 
50
49
  except Exception as e:
51
- logging.debug(f"Error instrumenting anthropic: {e}")
50
+ instrumentor._logger.debug(f"Error instrumenting anthropic: {e}")
52
51
  return
53
52
 
54
53
 
@@ -60,6 +59,7 @@ def messages_wrapper(
60
59
  *args: Any,
61
60
  **kwargs: Any,
62
61
  ) -> Any:
62
+ instrumentor._logger.debug("Anthropic messages wrapper")
63
63
  return instrumentor.invoke_wrapper(
64
64
  _AnthropicProviderRequest(instrumentor=instrumentor, streaming_type=_StreamingType.iterator, instance=instance),
65
65
  _IsStreaming.kwargs,
@@ -77,6 +77,7 @@ def stream_messages_wrapper(
77
77
  *args: Any,
78
78
  **kwargs: Any,
79
79
  ) -> Any:
80
+ instrumentor._logger.debug("Anthropic stream wrapper")
80
81
  return instrumentor.invoke_wrapper(
81
82
  _AnthropicProviderRequest(instrumentor=instrumentor, streaming_type=_StreamingType.stream_manager, instance=instance),
82
83
  _IsStreaming.true,
@@ -94,6 +95,7 @@ async def amessages_wrapper(
94
95
  *args: Any,
95
96
  **kwargs: Any,
96
97
  ) -> Any:
98
+ instrumentor._logger.debug("aync Anthropic messages wrapper")
97
99
  return await instrumentor.async_invoke_wrapper(
98
100
  _AnthropicProviderRequest(instrumentor=instrumentor, streaming_type=_StreamingType.iterator, instance=instance),
99
101
  _IsStreaming.kwargs,
@@ -111,6 +113,7 @@ async def astream_messages_wrapper(
111
113
  *args: Any,
112
114
  **kwargs: Any,
113
115
  ) -> Any:
116
+ instrumentor._logger.debug("aync Anthropic stream wrapper")
114
117
  return await instrumentor.async_invoke_wrapper(
115
118
  _AnthropicProviderRequest(instrumentor=instrumentor, streaming_type=_StreamingType.stream_manager, instance=instance),
116
119
  _IsStreaming.true,
@@ -202,7 +205,7 @@ class _AnthropicProviderRequest(_ProviderRequest):
202
205
  self._estimated_prompt_tokens = estimated_token_count
203
206
 
204
207
  except Exception:
205
- logging.warning("Error getting encoding for cl100k_base")
208
+ self._instrumentor._logger.warning("Error getting encoding for cl100k_base")
206
209
 
207
210
  return True
208
211
 
@@ -233,7 +236,7 @@ class _AnthropicProviderRequest(_ProviderRequest):
233
236
  self._ingest["provider_response_json"] = text
234
237
 
235
238
  except Exception as e:
236
- logging.debug(f"Error processing exception: {e}")
239
+ self._instrumentor._logger.debug(f"Error processing exception: {e}")
237
240
  return False
238
241
 
239
242
  return True
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import json
3
- import logging
4
3
  from typing import Any, Sequence
5
4
  from functools import wraps
6
5
  from typing_extensions import override
@@ -38,12 +37,13 @@ class BedrockInstrumentor:
38
37
  )
39
38
 
40
39
  except Exception as e:
41
- logging.debug(f"Error instrumenting bedrock: {e}")
40
+ instrumentor._logger.debug(f"Error instrumenting bedrock: {e}")
42
41
  return
43
42
 
44
43
  @_PayiInstrumentor.payi_wrapper
45
44
  def create_client_wrapper(instrumentor: _PayiInstrumentor, wrapped: Any, instance: Any, *args: Any, **kwargs: Any) -> Any: # noqa: ARG001
46
45
  if kwargs.get("service_name") != "bedrock-runtime":
46
+ instrumentor._logger.debug(f"skipping client wrapper creation for {kwargs.get('service_name', '')} service")
47
47
  return wrapped(*args, **kwargs)
48
48
 
49
49
  try:
@@ -53,13 +53,16 @@ def create_client_wrapper(instrumentor: _PayiInstrumentor, wrapped: Any, instanc
53
53
  client.converse = wrap_converse(instrumentor, client.converse)
54
54
  client.converse_stream = wrap_converse_stream(instrumentor, client.converse_stream)
55
55
 
56
+ instrumentor._logger.debug(f"Instrumented bedrock client")
57
+
56
58
  if BedrockInstrumentor._instrumentor._proxy_default:
57
59
  # Register client callbacks to handle the Pay-i extra_headers parameter in the inference calls and redirect the request to the Pay-i endpoint
58
60
  _register_bedrock_client_callbacks(client)
61
+ instrumentor._logger.debug(f"Registered bedrock client callbaks for proxy")
59
62
 
60
63
  return client
61
64
  except Exception as e:
62
- logging.debug(f"Error instrumenting bedrock client: {e}")
65
+ instrumentor._logger.debug(f"Error instrumenting bedrock client: {e}")
63
66
 
64
67
  return wrapped(*args, **kwargs)
65
68
 
@@ -148,6 +151,7 @@ def wrap_invoke(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
148
151
  modelId:str = kwargs.get("modelId", "") # type: ignore
149
152
 
150
153
  if _is_supported_model(modelId):
154
+ instrumentor._logger.debug(f"bedrock invoke wrapper, modelId: {modelId}")
151
155
  return instrumentor.invoke_wrapper(
152
156
  _BedrockInvokeSynchronousProviderRequest(instrumentor=instrumentor),
153
157
  _IsStreaming.false,
@@ -156,6 +160,8 @@ def wrap_invoke(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
156
160
  args,
157
161
  kwargs,
158
162
  )
163
+
164
+ instrumentor._logger.debug(f"bedrock invoke wrapper, unsupported modelId: {modelId}")
159
165
  return wrapped(*args, **kwargs)
160
166
 
161
167
  return invoke_wrapper
@@ -166,6 +172,7 @@ def wrap_invoke_stream(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
166
172
  modelId: str = kwargs.get("modelId", "") # type: ignore
167
173
 
168
174
  if _is_supported_model(modelId):
175
+ instrumentor._logger.debug(f"bedrock invoke stream wrapper, modelId: {modelId}")
169
176
  return instrumentor.invoke_wrapper(
170
177
  _BedrockInvokeStreamingProviderRequest(instrumentor=instrumentor, model_id=modelId),
171
178
  _IsStreaming.true,
@@ -174,6 +181,7 @@ def wrap_invoke_stream(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
174
181
  args,
175
182
  kwargs,
176
183
  )
184
+ instrumentor._logger.debug(f"bedrock invoke stream wrapper, unsupported modelId: {modelId}")
177
185
  return wrapped(*args, **kwargs)
178
186
 
179
187
  return invoke_wrapper
@@ -184,6 +192,7 @@ def wrap_converse(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
184
192
  modelId:str = kwargs.get("modelId", "") # type: ignore
185
193
 
186
194
  if _is_supported_model(modelId):
195
+ instrumentor._logger.debug(f"bedrock converse wrapper, modelId: {modelId}")
187
196
  return instrumentor.invoke_wrapper(
188
197
  _BedrockConverseSynchronousProviderRequest(instrumentor=instrumentor),
189
198
  _IsStreaming.false,
@@ -192,6 +201,7 @@ def wrap_converse(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
192
201
  args,
193
202
  kwargs,
194
203
  )
204
+ instrumentor._logger.debug(f"bedrock converse wrapper, unsupported modelId: {modelId}")
195
205
  return wrapped(*args, **kwargs)
196
206
 
197
207
  return invoke_wrapper
@@ -202,6 +212,7 @@ def wrap_converse_stream(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
202
212
  modelId: str = kwargs.get("modelId", "") # type: ignore
203
213
 
204
214
  if _is_supported_model(modelId):
215
+ instrumentor._logger.debug(f"bedrock converse stream wrapper, modelId: {modelId}")
205
216
  return instrumentor.invoke_wrapper(
206
217
  _BedrockConverseStreamingProviderRequest(instrumentor=instrumentor),
207
218
  _IsStreaming.true,
@@ -210,6 +221,7 @@ def wrap_converse_stream(instrumentor: _PayiInstrumentor, wrapped: Any) -> Any:
210
221
  args,
211
222
  kwargs,
212
223
  )
224
+ instrumentor._logger.debug(f"bedrock converse stream wrapper, unsupported modelId: {modelId}")
213
225
  return wrapped(*args, **kwargs)
214
226
 
215
227
  return invoke_wrapper
@@ -251,7 +263,7 @@ class _BedrockProviderRequest(_ProviderRequest):
251
263
  return True
252
264
 
253
265
  except Exception as e:
254
- logging.debug(f"Error processing exception: {e}")
266
+ self._instrumentor._logger.debug(f"Error processing exception: {e}")
255
267
  return False
256
268
 
257
269
  class _BedrockInvokeStreamingProviderRequest(_BedrockProviderRequest):
@@ -1,6 +1,5 @@
1
1
  import json
2
2
  import math
3
- import logging
4
3
  from typing import Any, List, Union, Optional, Sequence
5
4
  from typing_extensions import override
6
5
 
@@ -41,7 +40,7 @@ class GoogleGenAiInstrumentor:
41
40
  )
42
41
 
43
42
  except Exception as e:
44
- logging.debug(f"Error instrumenting vertex: {e}")
43
+ instrumentor._logger.debug(f"Error instrumenting vertex: {e}")
45
44
  return
46
45
 
47
46
  @_PayiInstrumentor.payi_wrapper
@@ -52,6 +51,7 @@ def generate_wrapper(
52
51
  *args: Any,
53
52
  **kwargs: Any,
54
53
  ) -> Any:
54
+ instrumentor._logger.debug("genai generate_content wrapper")
55
55
  return instrumentor.invoke_wrapper(
56
56
  _GoogleGenAiRequest(instrumentor),
57
57
  _IsStreaming.false,
@@ -69,6 +69,7 @@ def generate_stream_wrapper(
69
69
  *args: Any,
70
70
  **kwargs: Any,
71
71
  ) -> Any:
72
+ instrumentor._logger.debug("genai generate_content_stream wrapper")
72
73
  return instrumentor.invoke_wrapper(
73
74
  _GoogleGenAiRequest(instrumentor),
74
75
  _IsStreaming.true,
@@ -86,6 +87,7 @@ async def agenerate_wrapper(
86
87
  *args: Any,
87
88
  **kwargs: Any,
88
89
  ) -> Any:
90
+ instrumentor._logger.debug("async genai generate_content wrapper")
89
91
  return await instrumentor.async_invoke_wrapper(
90
92
  _GoogleGenAiRequest(instrumentor),
91
93
  _IsStreaming.false,
@@ -103,6 +105,7 @@ async def agenerate_stream_wrapper(
103
105
  *args: Any,
104
106
  **kwargs: Any,
105
107
  ) -> Any:
108
+ instrumentor._logger.debug("async genai generate_content_stream wrapper")
106
109
  return await instrumentor.async_invoke_wrapper(
107
110
  _GoogleGenAiRequest(instrumentor),
108
111
  _IsStreaming.true,
@@ -306,15 +309,15 @@ class _GoogleGenAiRequest(_ProviderRequest):
306
309
  usage = response_dict.get("usage_metadata", {})
307
310
  input = usage.get("prompt_token_count", 0)
308
311
 
309
- prompt_tokens_details: list[dict[str, Any]] = usage.get("prompt_tokens_details")
310
- candidates_tokens_details: list[dict[str, Any]] = usage.get("candidates_tokens_details")
312
+ prompt_tokens_details: list[dict[str, Any]] = usage.get("prompt_tokens_details", [])
313
+ candidates_tokens_details: list[dict[str, Any]] = usage.get("candidates_tokens_details", [])
311
314
 
312
315
  model: str = response_dict.get("model_version", "")
316
+
317
+ # for character billing only
318
+ large_context = "" if input < 128000 else "_large_context"
313
319
 
314
320
  if self._is_character_billing_model(model):
315
- # gemini 1.0 and 1.5 units are reported in characters, per second, per image, etc...
316
- large_context = "" if input < 128000 else "_large_context"
317
-
318
321
  for details in prompt_tokens_details:
319
322
  modality = details.get("modality", "")
320
323
  if not modality:
@@ -354,7 +357,7 @@ class _GoogleGenAiRequest(_ProviderRequest):
354
357
  audio_seconds = math.ceil(modality_token_count / 25)
355
358
  self.add_units("audio"+large_context, input=audio_seconds)
356
359
 
357
- elif model.startswith("gemini-2.0"):
360
+ else:
358
361
  for details in prompt_tokens_details:
359
362
  modality = details.get("modality", "")
360
363
  if not modality:
@@ -373,3 +376,19 @@ class _GoogleGenAiRequest(_ProviderRequest):
373
376
  modality_token_count = details.get("token_count", 0)
374
377
  if modality in ("VIDEO", "AUDIO", "TEXT", "IMAGE"):
375
378
  self.add_units(modality.lower(), output=modality_token_count)
379
+
380
+ if not self._ingest["units"]:
381
+ input = usage.get("prompt_token_count", 0)
382
+ output = usage.get("candidates_token_count", 0) * 4
383
+
384
+ if self._is_character_billing_model(model):
385
+ if self._prompt_character_count > 0:
386
+ input = self._prompt_character_count
387
+ else:
388
+ input *= 4
389
+
390
+ # if no units were added, add a default unit and assume 4 characters per token
391
+ self._ingest["units"]["text"+large_context] = Units(input=input, output=output)
392
+ else:
393
+ # if no units were added, add a default unit
394
+ self._ingest["units"]["text"] = Units(input=input, output=output)
@@ -1,5 +1,4 @@
1
1
  import json
2
- import logging
3
2
  from typing import Any, Union, Optional, Sequence
4
3
  from typing_extensions import override
5
4
  from importlib.metadata import version
@@ -62,7 +61,7 @@ class OpenAiInstrumentor:
62
61
  )
63
62
 
64
63
  except Exception as e:
65
- logging.debug(f"Error instrumenting openai: {e}")
64
+ instrumentor._logger.debug(f"Error instrumenting openai: {e}")
66
65
  return
67
66
 
68
67
 
@@ -74,6 +73,7 @@ def embeddings_wrapper(
74
73
  *args: Any,
75
74
  **kwargs: Any,
76
75
  ) -> Any:
76
+ instrumentor._logger.debug("OpenAI Embeddings wrapper")
77
77
  return instrumentor.invoke_wrapper(
78
78
  _OpenAiEmbeddingsProviderRequest(instrumentor),
79
79
  _IsStreaming.false,
@@ -91,6 +91,7 @@ async def aembeddings_wrapper(
91
91
  *args: Any,
92
92
  **kwargs: Any,
93
93
  ) -> Any:
94
+ instrumentor._logger.debug("async OpenAI Embeddings wrapper")
94
95
  return await instrumentor.async_invoke_wrapper(
95
96
  _OpenAiEmbeddingsProviderRequest(instrumentor),
96
97
  _IsStreaming.false,
@@ -108,6 +109,7 @@ def chat_wrapper(
108
109
  *args: Any,
109
110
  **kwargs: Any,
110
111
  ) -> Any:
112
+ instrumentor._logger.debug("OpenAI completions wrapper")
111
113
  return instrumentor.invoke_wrapper(
112
114
  _OpenAiChatProviderRequest(instrumentor),
113
115
  _IsStreaming.kwargs,
@@ -125,6 +127,7 @@ async def achat_wrapper(
125
127
  *args: Any,
126
128
  **kwargs: Any,
127
129
  ) -> Any:
130
+ instrumentor._logger.debug("async OpenAI completions wrapper")
128
131
  return await instrumentor.async_invoke_wrapper(
129
132
  _OpenAiChatProviderRequest(instrumentor),
130
133
  _IsStreaming.kwargs,
@@ -142,6 +145,7 @@ def responses_wrapper(
142
145
  *args: Any,
143
146
  **kwargs: Any,
144
147
  ) -> Any:
148
+ instrumentor._logger.debug("OpenAI responses wrapper")
145
149
  return instrumentor.invoke_wrapper(
146
150
  _OpenAiResponsesProviderRequest(instrumentor),
147
151
  _IsStreaming.kwargs,
@@ -159,6 +163,7 @@ async def aresponses_wrapper(
159
163
  *args: Any,
160
164
  **kwargs: Any,
161
165
  ) -> Any:
166
+ instrumentor._logger.debug("async OpenAI responses wrapper")
162
167
  return await instrumentor.async_invoke_wrapper(
163
168
  _OpenAiResponsesProviderRequest(instrumentor),
164
169
  _IsStreaming.kwargs,
@@ -207,12 +212,12 @@ class _OpenAiProviderRequest(_ProviderRequest):
207
212
  del extra_headers[PayiHeaderNames.resource_scope]
208
213
 
209
214
  if not price_as_resource and not price_as_category:
210
- logging.error("Azure OpenAI requires price as resource and/or category to be specified, not ingesting")
215
+ self._instrumentor._logger.error("Azure OpenAI requires price as resource and/or category to be specified, not ingesting")
211
216
  return False
212
217
 
213
218
  if resource_scope:
214
219
  if not(resource_scope in ["global", "datazone"] or resource_scope.startswith("region")):
215
- logging.error("Azure OpenAI invalid resource scope, not ingesting")
220
+ self._instrumentor._logger.error("Azure OpenAI invalid resource scope, not ingesting")
216
221
  return False
217
222
 
218
223
  self._ingest["resource_scope"] = resource_scope
@@ -256,7 +261,7 @@ class _OpenAiProviderRequest(_ProviderRequest):
256
261
  self._ingest["provider_response_json"] = text
257
262
 
258
263
  except Exception as e:
259
- logging.debug(f"Error processing exception: {e}")
264
+ self._instrumentor._logger.debug(f"Error processing exception: {e}")
260
265
  return False
261
266
 
262
267
  return True
@@ -372,7 +377,7 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
372
377
  try:
373
378
  enc = tiktoken.get_encoding("o200k_base") # type: ignore
374
379
  except Exception:
375
- logging.warning("Error getting encoding for fallback o200k_base")
380
+ self._instrumentor._logger.warning("Error getting encoding for fallback o200k_base")
376
381
  enc = None
377
382
 
378
383
  if enc:
@@ -450,7 +455,7 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
450
455
  try:
451
456
  enc = tiktoken.get_encoding("o200k_base") # type: ignore
452
457
  except Exception:
453
- logging.warning("Error getting encoding for fallback o200k_base")
458
+ self._instrumentor._logger.warning("Error getting encoding for fallback o200k_base")
454
459
  enc = None
455
460
 
456
461
  # find each content..type="input_text" and count tokens
@@ -1,6 +1,5 @@
1
1
  import json
2
2
  import math
3
- import logging
4
3
  from typing import Any, List, Union, Optional, Sequence
5
4
  from typing_extensions import override
6
5
 
@@ -43,7 +42,7 @@ class VertexInstrumentor:
43
42
  )
44
43
 
45
44
  except Exception as e:
46
- logging.debug(f"Error instrumenting vertex: {e}")
45
+ instrumentor._logger.debug(f"Error instrumenting vertex: {e}")
47
46
  return
48
47
 
49
48
  @_PayiInstrumentor.payi_wrapper
@@ -54,6 +53,7 @@ def generate_wrapper(
54
53
  *args: Any,
55
54
  **kwargs: Any,
56
55
  ) -> Any:
56
+ instrumentor._logger.debug("vertexai generate_content wrapper")
57
57
  return instrumentor.invoke_wrapper(
58
58
  _GoogleVertexRequest(instrumentor),
59
59
  _IsStreaming.kwargs,
@@ -71,6 +71,7 @@ async def agenerate_wrapper(
71
71
  *args: Any,
72
72
  **kwargs: Any,
73
73
  ) -> Any:
74
+ instrumentor._logger.debug("async vertexai generate_content wrapper")
74
75
  return await instrumentor.async_invoke_wrapper(
75
76
  _GoogleVertexRequest(instrumentor),
76
77
  _IsStreaming.kwargs,
@@ -252,15 +253,16 @@ class _GoogleVertexRequest(_ProviderRequest):
252
253
  usage = response_dict.get("usage_metadata", {})
253
254
  input = usage.get("prompt_token_count", 0)
254
255
 
255
- prompt_tokens_details: list[dict[str, Any]] = usage.get("prompt_tokens_details")
256
- candidates_tokens_details: list[dict[str, Any]] = usage.get("candidates_tokens_details")
256
+ prompt_tokens_details: list[dict[str, Any]] = usage.get("prompt_tokens_details", [])
257
+ candidates_tokens_details: list[dict[str, Any]] = usage.get("candidates_tokens_details", [])
257
258
 
258
259
  model: str = response_dict.get("model_version", "")
260
+
261
+ # for character billing only
262
+ large_context = "" if input < 128000 else "_large_context"
259
263
 
260
264
  if self._is_character_billing_model(model):
261
265
  # gemini 1.0 and 1.5 units are reported in characters, per second, per image, etc...
262
- large_context = "" if input < 128000 else "_large_context"
263
-
264
266
  for details in prompt_tokens_details:
265
267
  modality = details.get("modality", "")
266
268
  if not modality:
@@ -300,7 +302,7 @@ class _GoogleVertexRequest(_ProviderRequest):
300
302
  audio_seconds = math.ceil(modality_token_count / 25)
301
303
  self.add_units("audio"+large_context, input=audio_seconds)
302
304
 
303
- elif model.startswith("gemini-2.0"):
305
+ else:
304
306
  for details in prompt_tokens_details:
305
307
  modality = details.get("modality", "")
306
308
  if not modality:
@@ -319,3 +321,19 @@ class _GoogleVertexRequest(_ProviderRequest):
319
321
  modality_token_count = details.get("token_count", 0)
320
322
  if modality in ("VIDEO", "AUDIO", "TEXT", "IMAGE"):
321
323
  self.add_units(modality.lower(), output=modality_token_count)
324
+
325
+ if not self._ingest["units"]:
326
+ input = usage.get("prompt_token_count", 0)
327
+ output = usage.get("candidates_token_count", 0) * 4
328
+
329
+ if self._is_character_billing_model(model):
330
+ if self._prompt_character_count > 0:
331
+ input = self._prompt_character_count
332
+ else:
333
+ input *= 4
334
+
335
+ # if no units were added, add a default unit and assume 4 characters per token
336
+ self._ingest["units"]["text"+large_context] = Units(input=input, output=output)
337
+ else:
338
+ # if no units were added, add a default unit
339
+ self._ingest["units"]["text"] = Units(input=input, output=output)
payi/lib/instrument.py CHANGED
@@ -25,6 +25,8 @@ from payi.types.pay_i_common_models_api_router_header_info_param import PayIComm
25
25
  from .helpers import PayiCategories
26
26
  from .Stopwatch import Stopwatch
27
27
 
28
+ global _g_logger
29
+ _g_logger: logging.Logger = logging.getLogger("payi.instrument")
28
30
 
29
31
  class _ProviderRequest:
30
32
  def __init__(self, instrumentor: '_PayiInstrumentor', category: str, streaming_type: '_StreamingType'):
@@ -147,15 +149,24 @@ class _PayiInstrumentor:
147
149
  apayi: Optional[AsyncPayi],
148
150
  instruments: Union[Set[str], None] = None,
149
151
  log_prompt_and_response: bool = True,
152
+ logger: Optional[logging.Logger] = None,
150
153
  prompt_and_response_logger: Optional[
151
154
  Callable[[str, "dict[str, str]"], None]
152
155
  ] = None, # (request id, dict of data to store) -> None
153
156
  global_config: PayiInstrumentConfig = {},
154
157
  caller_filename: str = ""
155
158
  ):
159
+ global _g_logger
160
+ self._logger: logging.Logger = logger if logger else _g_logger
161
+
156
162
  self._payi: Optional[Payi] = payi
157
163
  self._apayi: Optional[AsyncPayi] = apayi
158
164
 
165
+ if self._payi:
166
+ _g_logger.debug(f"Pay-i instrumentor initialized with Payi instance: {self._payi}")
167
+ if self._apayi:
168
+ _g_logger.debug(f"Pay-i instrumentor initialized with AsyncPayi instance: {self._apayi}")
169
+
159
170
  self._context_stack: list[_Context] = [] # Stack of context dictionaries
160
171
  self._log_prompt_and_response: bool = log_prompt_and_response
161
172
  self._prompt_and_response_logger: Optional[Callable[[str, dict[str, str]], None]] = prompt_and_response_logger
@@ -191,7 +202,7 @@ class _PayiInstrumentor:
191
202
  self._call_async_use_case_definition_create(use_case_name=caller_filename, use_case_description=description)
192
203
  global_config["use_case_name"] = caller_filename
193
204
  except Exception as e:
194
- logging.error(f"Error creating default use case definition based on file name {caller_filename}: {e}")
205
+ self._logger.error(f"Error creating default use case definition based on file name {caller_filename}: {e}")
195
206
 
196
207
  self.__enter__()
197
208
  # _init_current_context will update the currrent context stack location
@@ -222,7 +233,7 @@ class _PayiInstrumentor:
222
233
  OpenAiInstrumentor.instrument(self)
223
234
 
224
235
  except Exception as e:
225
- logging.error(f"Error instrumenting OpenAI: {e}")
236
+ self._logger.error(f"Error instrumenting OpenAI: {e}")
226
237
 
227
238
  def _instrument_anthropic(self) -> None:
228
239
  from .AnthropicInstrumentor import AnthropicInstrumentor
@@ -231,7 +242,7 @@ class _PayiInstrumentor:
231
242
  AnthropicInstrumentor.instrument(self)
232
243
 
233
244
  except Exception as e:
234
- logging.error(f"Error instrumenting Anthropic: {e}")
245
+ self._logger.error(f"Error instrumenting Anthropic: {e}")
235
246
 
236
247
  def _instrument_aws_bedrock(self) -> None:
237
248
  from .BedrockInstrumentor import BedrockInstrumentor
@@ -240,7 +251,7 @@ class _PayiInstrumentor:
240
251
  BedrockInstrumentor.instrument(self)
241
252
 
242
253
  except Exception as e:
243
- logging.error(f"Error instrumenting AWS bedrock: {e}")
254
+ self._logger.error(f"Error instrumenting AWS bedrock: {e}")
244
255
 
245
256
  def _instrument_google_vertex(self) -> None:
246
257
  from .VertexInstrumentor import VertexInstrumentor
@@ -249,7 +260,7 @@ class _PayiInstrumentor:
249
260
  VertexInstrumentor.instrument(self)
250
261
 
251
262
  except Exception as e:
252
- logging.error(f"Error instrumenting Google Vertex: {e}")
263
+ self._logger.error(f"Error instrumenting Google Vertex: {e}")
253
264
 
254
265
  def _instrument_google_genai(self) -> None:
255
266
  from .GoogleGenAiInstrumentor import GoogleGenAiInstrumentor
@@ -258,13 +269,13 @@ class _PayiInstrumentor:
258
269
  GoogleGenAiInstrumentor.instrument(self)
259
270
 
260
271
  except Exception as e:
261
- logging.error(f"Error instrumenting Google GenAi: {e}")
272
+ self._logger.error(f"Error instrumenting Google GenAi: {e}")
262
273
 
263
274
  def _process_ingest_units(self, ingest_units: IngestUnitsParams, log_data: 'dict[str, str]') -> bool:
264
275
  if int(ingest_units.get("http_status_code") or 0) < 400:
265
276
  units = ingest_units.get("units", {})
266
277
  if not units or all(unit.get("input", 0) == 0 and unit.get("output", 0) == 0 for unit in units.values()):
267
- logging.error('No units to ingest')
278
+ self._logger.error('No units to ingest!')
268
279
  return False
269
280
 
270
281
  if self._log_prompt_and_response and self._prompt_and_response_logger:
@@ -302,20 +313,25 @@ class _PayiInstrumentor:
302
313
  async def _aingest_units(self, ingest_units: IngestUnitsParams) -> Optional[IngestResponse]:
303
314
  ingest_response: Optional[IngestResponse] = None
304
315
 
316
+ self._logger.debug(f"_aingest_units")
317
+
305
318
  # return early if there are no units to ingest and on a successul ingest request
306
319
  log_data: 'dict[str,str]' = {}
307
320
  if not self._process_ingest_units(ingest_units, log_data):
321
+ self._logger.debug(f"_aingest_units: exit early")
308
322
  return None
309
323
 
310
324
  try:
311
325
  if self._apayi:
312
- ingest_response= await self._apayi.ingest.units(**ingest_units)
326
+ ingest_response = await self._apayi.ingest.units(**ingest_units)
313
327
  elif self._payi:
314
328
  ingest_response = self._payi.ingest.units(**ingest_units)
315
329
  else:
316
- logging.error("No payi instance to ingest units")
330
+ self._logger.error("No payi instance to ingest units")
317
331
  return None
318
332
 
333
+ self._logger.debug(f"_aingest_units: success ({ingest_response})")
334
+
319
335
  if ingest_response:
320
336
  self._process_ingest_units_response(ingest_response)
321
337
 
@@ -325,7 +341,7 @@ class _PayiInstrumentor:
325
341
 
326
342
  return ingest_response
327
343
  except Exception as e:
328
- logging.error(f"Error Pay-i ingesting result: {e}")
344
+ self._logger.error(f"Error Pay-i ingesting request: {e}")
329
345
 
330
346
  return None
331
347
 
@@ -346,7 +362,7 @@ class _PayiInstrumentor:
346
362
  # When there's no running loop, create a new one
347
363
  asyncio.run(self._apayi.use_cases.definitions.create(name=use_case_name, description=use_case_description))
348
364
  except Exception as e:
349
- logging.error(f"Error calling async use_cases.definitions.create synchronously: {e}")
365
+ self._logger.error(f"Error calling async use_cases.definitions.create synchronously: {e}")
350
366
 
351
367
  def _call_aingest_sync(self, ingest_units: IngestUnitsParams) -> Optional[IngestResponse]:
352
368
  try:
@@ -362,20 +378,25 @@ class _PayiInstrumentor:
362
378
  # When there's no running loop, create a new one
363
379
  return asyncio.run(self._aingest_units(ingest_units))
364
380
  except Exception as e:
365
- logging.error(f"Error calling aingest_units synchronously: {e}")
381
+ self._logger.error(f"Error calling aingest_units synchronously: {e}")
366
382
  return None
367
383
 
368
384
  def _ingest_units(self, ingest_units: IngestUnitsParams) -> Optional[IngestResponse]:
369
385
  ingest_response: Optional[IngestResponse] = None
370
386
 
387
+ self._logger.debug(f"_ingest_units")
388
+
371
389
  # return early if there are no units to ingest and on a successul ingest request
372
390
  log_data: 'dict[str,str]' = {}
373
391
  if not self._process_ingest_units(ingest_units, log_data):
392
+ self._logger.debug(f"_ingest_units: exit early")
374
393
  return None
375
394
 
376
395
  try:
377
396
  if self._payi:
378
397
  ingest_response = self._payi.ingest.units(**ingest_units)
398
+ self._logger.debug(f"_ingest_units: success ({ingest_response})")
399
+
379
400
  self._process_ingest_units_response(ingest_response)
380
401
 
381
402
  if self._log_prompt_and_response and self._prompt_and_response_logger:
@@ -386,12 +407,13 @@ class _PayiInstrumentor:
386
407
  elif self._apayi:
387
408
  # task runs async. aingest_units will invoke the callback and post process
388
409
  ingest_response = self._call_aingest_sync(ingest_units)
410
+ self._logger.debug(f"_ingest_units: apayi success ({ingest_response})")
389
411
  return ingest_response
390
412
  else:
391
- logging.error("No payi instance to ingest units")
413
+ self._logger.error("No payi instance to ingest units")
392
414
 
393
415
  except Exception as e:
394
- logging.error(f"Error Pay-i ingesting result: {e}")
416
+ self._logger.error(f"Error Pay-i ingesting request: {e}")
395
417
 
396
418
  return None
397
419
 
@@ -655,11 +677,15 @@ class _PayiInstrumentor:
655
677
  args: Sequence[Any],
656
678
  kwargs: 'dict[str, Any]',
657
679
  ) -> Any:
680
+ self._logger.debug(f"async_invoke_wrapper: instance {instance}, category {request._category}")
681
+
658
682
  context = self.get_context()
659
683
 
660
684
  # Bedrock client does not have an async method
661
685
 
662
686
  if not context:
687
+ self._logger.debug(f"async_invoke_wrapper: no instrumentation context, exit early")
688
+
663
689
  # wrapped function invoked outside of decorator scope
664
690
  return await wrapped(*args, **kwargs)
665
691
 
@@ -673,6 +699,8 @@ class _PayiInstrumentor:
673
699
  if "extra_headers" not in kwargs and extra_headers:
674
700
  kwargs["extra_headers"] = extra_headers
675
701
 
702
+ self._logger.debug(f"async_invoke_wrapper: sending proxy request")
703
+
676
704
  return await wrapped(*args, **kwargs)
677
705
 
678
706
  current_frame = inspect.currentframe()
@@ -682,6 +710,7 @@ class _PayiInstrumentor:
682
710
  request._ingest['properties'] = { 'system.stack_trace': json.dumps(stack) }
683
711
 
684
712
  if request.process_request(instance, extra_headers, args, kwargs) is False:
713
+ self._logger.debug(f"async_invoke_wrapper: calling wrapped instance")
685
714
  return await wrapped(*args, **kwargs)
686
715
 
687
716
  sw = Stopwatch()
@@ -696,6 +725,8 @@ class _PayiInstrumentor:
696
725
 
697
726
  try:
698
727
  self._prepare_ingest(request, extra_headers, args, kwargs)
728
+ self._logger.debug(f"async_invoke_wrapper: calling wrapped instance (stream={stream})")
729
+
699
730
  sw.start()
700
731
  response = await wrapped(*args, **kwargs)
701
732
 
@@ -703,6 +734,8 @@ class _PayiInstrumentor:
703
734
  sw.stop()
704
735
  duration = sw.elapsed_ms_int()
705
736
 
737
+ self._logger.debug(f"invoke_wrapper: calling wrapped instance exception {e}")
738
+
706
739
  if request.process_exception(e, kwargs):
707
740
  request._ingest["end_to_end_latency_ms"] = duration
708
741
  await self._aingest_units(request._ingest)
@@ -746,10 +779,12 @@ class _PayiInstrumentor:
746
779
  kwargs=kwargs)
747
780
 
748
781
  if return_result:
782
+ self._logger.debug(f"async_invoke_wrapper: process sync response return")
749
783
  return return_result
750
784
 
751
785
  await self._aingest_units(request._ingest)
752
786
 
787
+ self._logger.debug(f"async_invoke_wrapper: finished")
753
788
  return response
754
789
 
755
790
  def invoke_wrapper(
@@ -761,13 +796,17 @@ class _PayiInstrumentor:
761
796
  args: Sequence[Any],
762
797
  kwargs: 'dict[str, Any]',
763
798
  ) -> Any:
799
+ self._logger.debug(f"invoke_wrapper: instance {instance}, category {request._category}")
800
+
764
801
  context = self.get_context()
765
802
 
766
803
  if not context:
767
804
  if request.is_bedrock():
768
805
  # boto3 doesn't allow extra_headers
769
806
  kwargs.pop("extra_headers", None)
770
-
807
+
808
+ self._logger.debug(f"invoke_wrapper: no instrumentation context, exit early")
809
+
771
810
  # wrapped function invoked outside of decorator scope
772
811
  return wrapped(*args, **kwargs)
773
812
 
@@ -785,6 +824,8 @@ class _PayiInstrumentor:
785
824
  # assumes anthropic and openai clients
786
825
  kwargs["extra_headers"] = extra_headers
787
826
 
827
+ self._logger.debug(f"invoke_wrapper: sending proxy request")
828
+
788
829
  return wrapped(*args, **kwargs)
789
830
 
790
831
  current_frame = inspect.currentframe()
@@ -794,6 +835,7 @@ class _PayiInstrumentor:
794
835
  request._ingest['properties'] = { 'system.stack_trace': json.dumps(stack) }
795
836
 
796
837
  if request.process_request(instance, extra_headers, args, kwargs) is False:
838
+ self._logger.debug(f"invoke_wrapper: calling wrapped instance")
797
839
  return wrapped(*args, **kwargs)
798
840
 
799
841
  sw = Stopwatch()
@@ -808,6 +850,8 @@ class _PayiInstrumentor:
808
850
 
809
851
  try:
810
852
  self._prepare_ingest(request, extra_headers, args, kwargs)
853
+ self._logger.debug(f"invoke_wrapper: calling wrapped instance (stream={stream})")
854
+
811
855
  sw.start()
812
856
  response = wrapped(*args, **kwargs)
813
857
 
@@ -815,6 +859,8 @@ class _PayiInstrumentor:
815
859
  sw.stop()
816
860
  duration = sw.elapsed_ms_int()
817
861
 
862
+ self._logger.debug(f"invoke_wrapper: calling wrapped instance exception {e}")
863
+
818
864
  if request.process_exception(e, kwargs):
819
865
  request._ingest["end_to_end_latency_ms"] = duration
820
866
  self._ingest_units(request._ingest)
@@ -867,10 +913,12 @@ class _PayiInstrumentor:
867
913
  log_prompt_and_response=self._log_prompt_and_response,
868
914
  kwargs=kwargs)
869
915
  if return_result:
916
+ self._logger.debug(f"invoke_wrapper: process sync response return")
870
917
  return return_result
871
918
 
872
919
  self._ingest_units(request._ingest)
873
920
 
921
+ self._logger.debug(f"invoke_wrapper: finished")
874
922
  return response
875
923
 
876
924
  def _create_extra_headers(
@@ -1029,6 +1077,8 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1029
1077
  request: _ProviderRequest,
1030
1078
  ) -> None:
1031
1079
 
1080
+ instrumentor._logger.debug(f"StreamIteratorWrapper: instance {instance}, category {request._category}")
1081
+
1032
1082
  bedrock_from_stream: bool = False
1033
1083
  if request.is_bedrock():
1034
1084
  request._ingest["provider_response_id"] = response["ResponseMetadata"]["RequestId"]
@@ -1057,21 +1107,28 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1057
1107
  self._bedrock_from_stream: bool = bedrock_from_stream
1058
1108
 
1059
1109
  def __enter__(self) -> Any:
1110
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __enter__")
1060
1111
  return self
1061
1112
 
1062
1113
  def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
1114
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __exit__")
1063
1115
  self.__wrapped__.__exit__(exc_type, exc_val, exc_tb) # type: ignore
1064
1116
 
1065
1117
  async def __aenter__(self) -> Any:
1118
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __aenter__")
1066
1119
  return self
1067
1120
 
1068
1121
  async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
1122
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __aexit__")
1069
1123
  await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb) # type: ignore
1070
1124
 
1071
1125
  def __iter__(self) -> Any:
1072
1126
  if self._is_bedrock:
1073
1127
  # MUST reside in a separate function so that the yield statement (e.g. the generator) doesn't implicitly return its own iterator and overriding self
1128
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: bedrock __iter__")
1074
1129
  return self._iter_bedrock()
1130
+
1131
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __iter__")
1075
1132
  return self
1076
1133
 
1077
1134
  def _iter_bedrock(self) -> Any:
@@ -1086,9 +1143,12 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1086
1143
  self._evaluate_chunk(decode)
1087
1144
  yield event
1088
1145
 
1146
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: bedrock iter finished")
1147
+
1089
1148
  self._stop_iteration()
1090
1149
 
1091
1150
  def __aiter__(self) -> Any:
1151
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __aiter__")
1092
1152
  return self
1093
1153
 
1094
1154
  def __next__(self) -> object:
@@ -1097,6 +1157,8 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1097
1157
  except Exception as e:
1098
1158
  if isinstance(e, StopIteration):
1099
1159
  self._stop_iteration()
1160
+ else:
1161
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __next__ exception {e}")
1100
1162
  raise e
1101
1163
  else:
1102
1164
  if self._evaluate_chunk(chunk) == False:
@@ -1110,6 +1172,8 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1110
1172
  except Exception as e:
1111
1173
  if isinstance(e, StopAsyncIteration):
1112
1174
  await self._astop_iteration()
1175
+ else:
1176
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: __anext__ exception {e}")
1113
1177
  raise e
1114
1178
  else:
1115
1179
  if self._evaluate_chunk(chunk) == False:
@@ -1128,6 +1192,8 @@ class _StreamIteratorWrapper(ObjectProxy): # type: ignore
1128
1192
  return self._request.process_chunk(chunk)
1129
1193
 
1130
1194
  def _process_stop_iteration(self) -> None:
1195
+ self._instrumentor._logger.debug(f"StreamIteratorWrapper: stop iteration")
1196
+
1131
1197
  self._stopwatch.stop()
1132
1198
  self._request._ingest["end_to_end_latency_ms"] = self._stopwatch.elapsed_ms_int()
1133
1199
  self._request._ingest["http_status_code"] = 200
@@ -1164,6 +1230,8 @@ class _StreamManagerWrapper(ObjectProxy): # type: ignore
1164
1230
  stopwatch: Stopwatch,
1165
1231
  request: _ProviderRequest,
1166
1232
  ) -> None:
1233
+ instrumentor._logger.debug(f"StreamManagerWrapper: instance {instance}, category {request._category}")
1234
+
1167
1235
  super().__init__(stream_manager) # type: ignore
1168
1236
 
1169
1237
  self._stream_manager = stream_manager
@@ -1176,6 +1244,8 @@ class _StreamManagerWrapper(ObjectProxy): # type: ignore
1176
1244
  self._done: bool = False
1177
1245
 
1178
1246
  def __enter__(self) -> _StreamIteratorWrapper:
1247
+ self._instrumentor._logger.debug(f"_StreamManagerWrapper: __enter__")
1248
+
1179
1249
  return _StreamIteratorWrapper(
1180
1250
  response=self.__wrapped__.__enter__(), # type: ignore
1181
1251
  instance=self._instance,
@@ -1193,6 +1263,8 @@ class _GeneratorWrapper: # type: ignore
1193
1263
  stopwatch: Stopwatch,
1194
1264
  request: _ProviderRequest,
1195
1265
  ) -> None:
1266
+ instrumentor._logger.debug(f"GeneratorWrapper: instance {instance}, category {request._category}")
1267
+
1196
1268
  super().__init__() # type: ignore
1197
1269
 
1198
1270
  self._generator = generator
@@ -1206,9 +1278,11 @@ class _GeneratorWrapper: # type: ignore
1206
1278
  self._done: bool = False
1207
1279
 
1208
1280
  def __iter__(self) -> Any:
1281
+ self._instrumentor._logger.debug(f"GeneratorWrapper: __iter__")
1209
1282
  return self
1210
1283
 
1211
1284
  def __aiter__(self) -> Any:
1285
+ self._instrumentor._logger.debug(f"GeneratorWrapper: __aiter__")
1212
1286
  return self
1213
1287
 
1214
1288
  def __next__(self) -> Any:
@@ -1219,9 +1293,12 @@ class _GeneratorWrapper: # type: ignore
1219
1293
  chunk = next(self._generator)
1220
1294
  return self._process_chunk(chunk)
1221
1295
 
1222
- except StopIteration as stop_exception:
1223
- self._process_stop_iteration()
1224
- raise stop_exception
1296
+ except Exception as e:
1297
+ if isinstance(e, StopIteration):
1298
+ self._process_stop_iteration()
1299
+ else:
1300
+ self._instrumentor._logger.debug(f"GeneratorWrapper: __next__ exception {e}")
1301
+ raise e
1225
1302
 
1226
1303
  async def __anext__(self) -> Any:
1227
1304
  if self._done:
@@ -1231,9 +1308,12 @@ class _GeneratorWrapper: # type: ignore
1231
1308
  chunk = await anext(self._generator) # type: ignore
1232
1309
  return self._process_chunk(chunk)
1233
1310
 
1234
- except StopAsyncIteration as stop_exception:
1235
- await self._process_async_stop_iteration()
1236
- raise stop_exception
1311
+ except Exception as e:
1312
+ if isinstance(e, StopAsyncIteration):
1313
+ await self._process_async_stop_iteration()
1314
+ else:
1315
+ self._instrumentor._logger.debug(f"GeneratorWrapper: __anext__ exception {e}")
1316
+ raise e
1237
1317
 
1238
1318
  @staticmethod
1239
1319
  def _chunk_to_dict(chunk: Any) -> 'dict[str, object]':
@@ -1257,6 +1337,8 @@ class _GeneratorWrapper: # type: ignore
1257
1337
  return chunk
1258
1338
 
1259
1339
  def _process_stop_iteration(self) -> None:
1340
+ self._instrumentor._logger.debug(f"GeneratorWrapper: stop iteration")
1341
+
1260
1342
  self._stopwatch.stop()
1261
1343
  self._request._ingest["end_to_end_latency_ms"] = self._stopwatch.elapsed_ms_int()
1262
1344
  self._request._ingest["http_status_code"] = 200
@@ -1268,6 +1350,8 @@ class _GeneratorWrapper: # type: ignore
1268
1350
  self._done = True
1269
1351
 
1270
1352
  async def _process_async_stop_iteration(self) -> None:
1353
+ self._instrumentor._logger.debug(f"GeneratorWrapper: async stop iteration")
1354
+
1271
1355
  self._stopwatch.stop()
1272
1356
  self._request._ingest["end_to_end_latency_ms"] = self._stopwatch.elapsed_ms_int()
1273
1357
  self._request._ingest["http_status_code"] = 200
@@ -1287,6 +1371,7 @@ def payi_instrument(
1287
1371
  log_prompt_and_response: bool = True,
1288
1372
  prompt_and_response_logger: Optional[Callable[[str, "dict[str, str]"], None]] = None,
1289
1373
  config: Optional[PayiInstrumentConfig] = None,
1374
+ logger: Optional[logging.Logger] = None,
1290
1375
  ) -> None:
1291
1376
  global _instrumentor
1292
1377
  if (_instrumentor):
@@ -1316,6 +1401,7 @@ def payi_instrument(
1316
1401
  apayi=apayi_param,
1317
1402
  instruments=instruments,
1318
1403
  log_prompt_and_response=log_prompt_and_response,
1404
+ logger=logger,
1319
1405
  prompt_and_response_logger=prompt_and_response_logger,
1320
1406
  global_config=config if config else PayiInstrumentConfig(),
1321
1407
  caller_filename=caller_filename
@@ -1335,7 +1421,10 @@ def track(
1335
1421
  if asyncio.iscoroutinefunction(func):
1336
1422
  async def awrapper(*args: Any, **kwargs: Any) -> Any:
1337
1423
  if not _instrumentor:
1424
+ _g_logger.debug(f"track: no instrumentor!")
1338
1425
  return await func(*args, **kwargs)
1426
+
1427
+ _instrumentor._logger.debug(f"track: call async function (proxy={proxy}, limit_ids={limit_ids}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1339
1428
  # Call the instrumentor's _call_func for async functions
1340
1429
  return await _instrumentor._acall_func(
1341
1430
  func,
@@ -1354,7 +1443,11 @@ def track(
1354
1443
  else:
1355
1444
  def wrapper(*args: Any, **kwargs: Any) -> Any:
1356
1445
  if not _instrumentor:
1446
+ _g_logger.debug(f"track: no instrumentor!")
1357
1447
  return func(*args, **kwargs)
1448
+
1449
+ _instrumentor._logger.debug(f"track: call sync function (proxy={proxy}, limit_ids={limit_ids}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1450
+
1358
1451
  return _instrumentor._call_func(
1359
1452
  func,
1360
1453
  proxy,
@@ -1427,7 +1520,11 @@ def ingest(
1427
1520
  if asyncio.iscoroutinefunction(func):
1428
1521
  async def awrapper(*args: Any, **kwargs: Any) -> Any:
1429
1522
  if not _instrumentor:
1523
+ _g_logger.debug(f"ingest: call no instrumentor!")
1430
1524
  return await func(*args, **kwargs)
1525
+
1526
+ _instrumentor._logger.debug(f"ingest: call async function (limit_ids={limit_ids}, experience_name={experience_name}, experience_id={experience_id}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1527
+
1431
1528
  # Call the instrumentor's _call_func for async functions
1432
1529
  return await _instrumentor._acall_func(
1433
1530
  func,
@@ -1446,7 +1543,11 @@ def ingest(
1446
1543
  else:
1447
1544
  def wrapper(*args: Any, **kwargs: Any) -> Any:
1448
1545
  if not _instrumentor:
1546
+ _g_logger.debug(f"ingest: call no instrumentor!")
1449
1547
  return func(*args, **kwargs)
1548
+
1549
+ _instrumentor._logger.debug(f"ingest: call sync function (limit_ids={limit_ids}, experience_name={experience_name}, experience_id={experience_id}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1550
+
1450
1551
  return _instrumentor._call_func(
1451
1552
  func,
1452
1553
  False,
@@ -1478,7 +1579,6 @@ def proxy(
1478
1579
  "@proxy is deprecated and will be removed in a future version. Use @track instead.",
1479
1580
  DeprecationWarning,
1480
1581
  stacklevel=2
1481
-
1482
1582
  )
1483
1583
 
1484
1584
  def _proxy(func: Any) -> Any:
@@ -1486,7 +1586,11 @@ def proxy(
1486
1586
  if asyncio.iscoroutinefunction(func):
1487
1587
  async def _proxy_awrapper(*args: Any, **kwargs: Any) -> Any:
1488
1588
  if not _instrumentor:
1589
+ _g_logger.debug(f"proxy: call no instrumentor!")
1489
1590
  return await func(*args, **kwargs)
1591
+
1592
+ _instrumentor._logger.debug(f"proxy: call async function (limit_ids={limit_ids}, experience_name={experience_name}, experience_id={experience_id}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1593
+
1490
1594
  return await _instrumentor._call_func(
1491
1595
  func,
1492
1596
  True,
@@ -1505,7 +1609,11 @@ def proxy(
1505
1609
  else:
1506
1610
  def _proxy_wrapper(*args: Any, **kwargs: Any) -> Any:
1507
1611
  if not _instrumentor:
1612
+ _g_logger.debug(f"proxy: call no instrumentor!")
1508
1613
  return func(*args, **kwargs)
1614
+
1615
+ _instrumentor._logger.debug(f"proxy: call sync function (limit_ids={limit_ids}, experience_name={experience_name}, experience_id={experience_id}, use_case_name={use_case_name}, use_case_id={use_case_id}, use_case_version={use_case_version}, user_id={user_id})")
1616
+
1509
1617
  return _instrumentor._call_func(
1510
1618
  func,
1511
1619
  True,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: payi
3
- Version: 0.1.0a80
3
+ Version: 0.1.0a82
4
4
  Summary: The official Python library for the payi API
5
5
  Project-URL: Homepage, https://github.com/Pay-i/pay-i-python
6
6
  Project-URL: Repository, https://github.com/Pay-i/pay-i-python
@@ -11,7 +11,7 @@ payi/_resource.py,sha256=j2jIkTr8OIC8sU6-05nxSaCyj4MaFlbZrwlyg4_xJos,1088
11
11
  payi/_response.py,sha256=rh9oJAvCKcPwQFm4iqH_iVrmK8bNx--YP_A2a4kN1OU,28776
12
12
  payi/_streaming.py,sha256=Z_wIyo206T6Jqh2rolFg2VXZgX24PahLmpURp0-NssU,10092
13
13
  payi/_types.py,sha256=7jE5MoQQFVoVxw5vVzvZ2Ao0kcjfNOGsBgyJfLBEnMo,6195
14
- payi/_version.py,sha256=8owWzJCmVOUMtN_Md7Vwk-OBmbGW5fhSMc2EUWFK3X8,165
14
+ payi/_version.py,sha256=KAmqXUJQtR0NFE11hO5_6a9OQxaHRxsEAfoNmO8-neY,165
15
15
  payi/pagination.py,sha256=k2356QGPOUSjRF2vHpwLBdF6P-2vnQzFfRIJQAHGQ7A,1258
16
16
  payi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  payi/_utils/__init__.py,sha256=PNZ_QJuzZEgyYXqkO1HVhGkj5IU9bglVUcw7H-Knjzw,2062
@@ -25,14 +25,14 @@ payi/_utils/_transform.py,sha256=n7kskEWz6o__aoNvhFoGVyDoalNe6mJwp-g7BWkdj88,156
25
25
  payi/_utils/_typing.py,sha256=D0DbbNu8GnYQTSICnTSHDGsYXj8TcAKyhejb0XcnjtY,4602
26
26
  payi/_utils/_utils.py,sha256=ts4CiiuNpFiGB6YMdkQRh2SZvYvsl7mAF-JWHCcLDf4,12312
27
27
  payi/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
28
- payi/lib/AnthropicInstrumentor.py,sha256=Y7W99dwnaAM4W5IIkmeGLaTunoMsBDt-AjUasrI1mXM,8900
29
- payi/lib/BedrockInstrumentor.py,sha256=cIjHlPQmVEAixabUoT3mV8h50lfzdc1u8iLj0gQNukE,14076
30
- payi/lib/GoogleGenAiInstrumentor.py,sha256=isfuYH0y9LZZPcjPkynRQzwhtsN9H2p4DqtPYTm8hqI,13549
31
- payi/lib/OpenAIInstrumentor.py,sha256=6h5MYtj4jAXEJZ_IyMriZxWXrN-hQ7SdJXIQW7DrTFo,18014
28
+ payi/lib/AnthropicInstrumentor.py,sha256=PNSrEsij-rRaRN0_rjqQI00NK_FahrrRN4gikpDiiTc,9186
29
+ payi/lib/BedrockInstrumentor.py,sha256=qXkrYeYHba3gOnp_VnQ6sMAUVLx0RpeSJ8gMWz7-A2g,15136
30
+ payi/lib/GoogleGenAiInstrumentor.py,sha256=WE_3tyrp96UDHXymY4ky28wtFTROF-v5mSzOP2XuGxw,14489
31
+ payi/lib/OpenAIInstrumentor.py,sha256=hrEEzzeGtQmZfLxGsyMKhDtB0S7YPNfsjgMG5vx5jMA,18485
32
32
  payi/lib/Stopwatch.py,sha256=7OJlxvr2Jyb6Zr1LYCYKczRB7rDVKkIR7gc4YoleNdE,764
33
- payi/lib/VertexInstrumentor.py,sha256=aW4ZT7YVXy8V_g91KkSEapzI7Cy6UQfaU1D0ZOU4eBE,11886
33
+ payi/lib/VertexInstrumentor.py,sha256=E0511pzzB5e3xY7xNSq_gn2BERnnWRPWkOx4tyqvQ3A,12779
34
34
  payi/lib/helpers.py,sha256=K1KAfWrpPT1UUGNxspLe1lHzQjP3XV5Pkh9IU4pKMok,4624
35
- payi/lib/instrument.py,sha256=UpZ6SGg3YI9lSxmwH5ziwe1xt_ca6FS35CcNJpG9ONM,58214
35
+ payi/lib/instrument.py,sha256=w55jtWm6PoIJqAPEeRJXWc41QTuUxzgQB9BxIdFYMdU,64622
36
36
  payi/resources/__init__.py,sha256=1rtrPLWbNt8oJGOp6nwPumKLJ-ftez0B6qwLFyfcoP4,2972
37
37
  payi/resources/ingest.py,sha256=8HNHEyfgIyJNqCh0rOhO9msoc61-8IyifJ6AbxjCrDg,22612
38
38
  payi/resources/categories/__init__.py,sha256=w5gMiPdBSzJA_qfoVtFBElaoe8wGf_O63R7R1Spr6Gk,1093
@@ -142,7 +142,7 @@ payi/types/use_cases/definitions/kpi_retrieve_response.py,sha256=uQXliSvS3k-yDYw
142
142
  payi/types/use_cases/definitions/kpi_update_params.py,sha256=jbawdWAdMnsTWVH0qfQGb8W7_TXe3lq4zjSRu44d8p8,373
143
143
  payi/types/use_cases/definitions/kpi_update_response.py,sha256=zLyEoT0S8d7XHsnXZYT8tM7yDw0Aze0Mk-_Z6QeMtc8,459
144
144
  payi/types/use_cases/definitions/limit_config_create_params.py,sha256=pzQza_16N3z8cFNEKr6gPbFvuGFrwNuGxAYb--Kbo2M,449
145
- payi-0.1.0a80.dist-info/METADATA,sha256=__22ITl_q65jasQ663-ur0WOvHXW-PYzIVu0tq6PnYs,15180
146
- payi-0.1.0a80.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
147
- payi-0.1.0a80.dist-info/licenses/LICENSE,sha256=CQt03aM-P4a3Yg5qBg3JSLVoQS3smMyvx7tYg_6V7Gk,11334
148
- payi-0.1.0a80.dist-info/RECORD,,
145
+ payi-0.1.0a82.dist-info/METADATA,sha256=mD2GWPPSx1-aeLthkLrXEp3Ng4-TBjMN6mDMIljfxkc,15180
146
+ payi-0.1.0a82.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
147
+ payi-0.1.0a82.dist-info/licenses/LICENSE,sha256=CQt03aM-P4a3Yg5qBg3JSLVoQS3smMyvx7tYg_6V7Gk,11334
148
+ payi-0.1.0a82.dist-info/RECORD,,