payi 0.1.0a35__py3-none-any.whl → 0.1.0a36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of payi might be problematic. Click here for more details.

payi/lib/instrument.py ADDED
@@ -0,0 +1,514 @@
1
+ import json
2
+ import uuid
3
+ import asyncio
4
+ import inspect
5
+ import logging
6
+ import traceback
7
+ from typing import Any, Set, Union, Callable, Optional
8
+
9
+ from wrapt import ObjectProxy # type: ignore
10
+
11
+ from payi import Payi, AsyncPayi
12
+ from payi.types import IngestUnitsParams
13
+
14
+ from .Stopwatch import Stopwatch
15
+ from .Instruments import Instruments
16
+
17
+
18
+ class PayiInstrumentor:
19
+ def __init__(
20
+ self,
21
+ payi: Union[Payi, AsyncPayi, None] = None,
22
+ instruments: Union[Set[Instruments], None] = None,
23
+ log_prompt_and_response: bool = True,
24
+ prompt_and_response_logger: Optional[
25
+ Callable[[str, "dict[str, str]"], None]
26
+ ] = None, # (request id, dict of data to store) -> None
27
+ ):
28
+ self._payi: Union[Payi, AsyncPayi, None] = payi
29
+ self._context_stack: list[dict[str, Any]] = [] # Stack of context dictionaries
30
+ self._log_prompt_and_response: bool = log_prompt_and_response
31
+ self._prompt_and_response_logger: Optional[Callable[[str, dict[str, str]], None]] = prompt_and_response_logger
32
+
33
+ self._blocked_limits: set[str] = set()
34
+ self._exceeded_limits: set[str] = set()
35
+
36
+ if instruments is None or Instruments.ALL in instruments:
37
+ self._instrument_all()
38
+ else:
39
+ self._instrument_specific(instruments)
40
+
41
+ def _instrument_all(self) -> None:
42
+ self._instrument_openai()
43
+ self._instrument_anthropic()
44
+
45
+ def _instrument_specific(self, instruments: Set[Instruments]) -> None:
46
+ if Instruments.OPENAI in instruments:
47
+ self._instrument_openai()
48
+ if Instruments.ANTHROPIC in instruments:
49
+ self._instrument_anthropic()
50
+
51
+ def _instrument_openai(self) -> None:
52
+ from .OpenAIInstrumentor import OpenAiInstrumentor
53
+
54
+ try:
55
+ OpenAiInstrumentor.instrument(self)
56
+
57
+ except Exception as e:
58
+ logging.error(f"Error instrumenting OpenAI: {e}")
59
+
60
+ def _instrument_anthropic(self) -> None:
61
+ from .AnthropicInstrumentor import AnthropicIntrumentor
62
+
63
+ try:
64
+ AnthropicIntrumentor.instrument(self)
65
+
66
+ except Exception as e:
67
+ logging.error(f"Error instrumenting Anthropic: {e}")
68
+
69
+ def _ingest_units(self, ingest_units: IngestUnitsParams) -> None:
70
+ # return early if there are no units to ingest and on a successul ingest request
71
+ if int(ingest_units.get("http_status_code") or 0) < 400:
72
+ units = ingest_units.get("units", {})
73
+ if not units or all(unit.get("input", 0) == 0 and unit.get("output", 0) == 0 for unit in units.values()):
74
+ logging.error(
75
+ 'No units to ingest. For OpenAI streaming calls, make sure you pass stream_options={"include_usage": True}'
76
+ )
77
+ return
78
+
79
+ try:
80
+ if isinstance(self._payi, AsyncPayi):
81
+ loop = asyncio.new_event_loop()
82
+ asyncio.set_event_loop(loop)
83
+ try:
84
+ ingest_result = loop.run_until_complete(self._payi.ingest.units(**ingest_units))
85
+ finally:
86
+ loop.close()
87
+ elif isinstance(self._payi, Payi):
88
+ ingest_result = self._payi.ingest.units(**ingest_units)
89
+ else:
90
+ logging.error("No payi instance to ingest units")
91
+ return
92
+
93
+ if ingest_result.xproxy_result.limits:
94
+ for limit_id, state in ingest_result.xproxy_result.limits.items():
95
+ removeBlockedId: bool = False
96
+
97
+ if state.state == "blocked":
98
+ self._blocked_limits.add(limit_id)
99
+ elif state.state == "exceeded":
100
+ self._exceeded_limits.add(limit_id)
101
+ removeBlockedId = True
102
+ elif state.state == "ok":
103
+ removeBlockedId = True
104
+
105
+ # opportunistically remove blocked limits
106
+ if removeBlockedId:
107
+ self._blocked_limits.discard(limit_id)
108
+
109
+ if self._log_prompt_and_response and self._prompt_and_response_logger:
110
+ request_id = ingest_result.xproxy_result.request_id
111
+
112
+ log_data = {}
113
+ response_json = ingest_units.pop("provider_response_json", None)
114
+ request_json = ingest_units.pop("provider_request_json", None)
115
+ stack_trace = ingest_units.get("properties", {}).pop("system.stack_trace", None) # type: ignore
116
+
117
+ if response_json is not None:
118
+ # response_json is a list of strings, convert a single json string
119
+ log_data["provider_response_json"] = json.dumps(response_json)
120
+ if request_json is not None:
121
+ log_data["provider_request_json"] = request_json
122
+ if stack_trace is not None:
123
+ log_data["stack_trace"] = stack_trace
124
+
125
+ self._prompt_and_response_logger(request_id, log_data) # type: ignore
126
+
127
+ except Exception as e:
128
+ logging.error(f"Error Pay-i ingesting result: {e}")
129
+
130
+ def _call_func(
131
+ self,
132
+ func: Any,
133
+ proxy: bool,
134
+ limit_ids: Optional["list[str]"],
135
+ request_tags: Optional["list[str]"],
136
+ experience_name: Optional[str],
137
+ experience_id: Optional[str],
138
+ user_id: Optional[str],
139
+ *args: Any,
140
+ **kwargs: Any,
141
+ ) -> Any:
142
+ if len(self._context_stack) > 0:
143
+ # copy current context into the upcoming context
144
+ context = self._context_stack[-1].copy()
145
+ context.pop("proxy", None)
146
+ previous_experience_name = context["experience_name"]
147
+ previous_experience_id = context["experience_id"]
148
+ else:
149
+ context = {}
150
+ previous_experience_name = None
151
+ previous_experience_id = None
152
+
153
+ with self:
154
+ context["proxy"] = proxy
155
+
156
+ # Handle experience name and ID logic
157
+ if not experience_name:
158
+ # If no experience_name specified, use previous values
159
+ context["experience_name"] = previous_experience_name
160
+ context["experience_id"] = previous_experience_id
161
+ else:
162
+ # If experience_name is specified
163
+ if experience_name == previous_experience_name:
164
+ # Same experience name, use previous ID unless new one specified
165
+ context["experience_name"] = experience_name
166
+ context["experience_id"] = experience_id if experience_id else previous_experience_id
167
+ else:
168
+ # Different experience name, use specified ID or generate one
169
+ context["experience_name"] = experience_name
170
+ context["experience_id"] = experience_id if experience_id else str(uuid.uuid4())
171
+
172
+ # set any values explicitly passed by the caller, otherwise use what is already in the context
173
+ if limit_ids:
174
+ context["limit_ids"] = limit_ids
175
+ if request_tags:
176
+ context["request_tags"] = request_tags
177
+ if user_id:
178
+ context["user_id"] = user_id
179
+
180
+ self.set_context(context)
181
+
182
+ return func(*args, **kwargs)
183
+
184
+ def __enter__(self) -> Any:
185
+ # Push a new context dictionary onto the stack
186
+ self._context_stack.append({})
187
+ return self
188
+
189
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
190
+ # Pop the current context off the stack
191
+ if self._context_stack:
192
+ self._context_stack.pop()
193
+
194
+ def set_context(self, context: "dict[str, Any]") -> None:
195
+ # Update the current top of the stack with the provided context
196
+ if self._context_stack:
197
+ self._context_stack[-1].update(context)
198
+
199
+ def get_context(self) -> Optional["dict[str, Any]"]:
200
+ # Return the current top of the stack
201
+ return self._context_stack[-1] if self._context_stack else None
202
+
203
+ def chat_wrapper(
204
+ self,
205
+ category: str,
206
+ process_chunk: Callable[[Any, IngestUnitsParams], None],
207
+ process_synchronous_response: Optional[Callable[[Any, IngestUnitsParams, bool], None]],
208
+ wrapped: Any,
209
+ instance: Any,
210
+ args: Any,
211
+ kwargs: Any,
212
+ ) -> Any:
213
+ context = self.get_context()
214
+
215
+ if not context:
216
+ # should not happen
217
+ return wrapped(*args, **kwargs)
218
+
219
+ if context.get("proxy", True):
220
+ proxy_extra_headers = kwargs.get("extra_headers", {})
221
+
222
+ self._update_headers(context, proxy_extra_headers)
223
+
224
+ if "extra_headers" not in kwargs:
225
+ kwargs["extra_headers"] = proxy_extra_headers
226
+
227
+ return wrapped(*args, **kwargs)
228
+
229
+ ingest: IngestUnitsParams = {"category": category, "resource": kwargs.get("model"), "units": {}}
230
+
231
+ # blocked_limit = next((limit for limit in (context.get('limit_ids') or []) if limit in self._blocked_limits), None)
232
+ # if blocked_limit:
233
+ # raise Exception(f"Limit {blocked_limit} is blocked")
234
+ current_frame = inspect.currentframe()
235
+ # f_back excludes the current frame, strip() cleans up whitespace and newlines
236
+ stack = [frame.strip() for frame in traceback.format_stack(current_frame.f_back)] # type: ignore
237
+
238
+ # TODO add back once feature is in prod
239
+ # ingest['properties'] = { 'system.stack_trace': json.dumps(stack) }
240
+
241
+ sw = Stopwatch()
242
+ stream = kwargs.get("stream", False)
243
+
244
+ try:
245
+ limit_ids = context.get("limit_ids")
246
+ request_tags = context.get("request_tags")
247
+ experience_name = context.get("experience_name")
248
+ experience_id = context.get("experience_id")
249
+ user_id = context.get("user_id")
250
+
251
+ if limit_ids:
252
+ ingest["limit_ids"] = limit_ids
253
+ if request_tags:
254
+ ingest["request_tags"] = request_tags
255
+ if experience_name:
256
+ ingest["experience_name"] = experience_name
257
+ if experience_id:
258
+ ingest["experience_id"] = experience_id
259
+ if user_id:
260
+ ingest["user_id"] = user_id
261
+
262
+ extra_headers: dict[str, str] = kwargs.get("extra_headers") or {}
263
+
264
+ if len(extra_headers) > 0:
265
+ ingest["provider_request_headers"] = {k: [v] for k, v in extra_headers.items()} # type: ignore
266
+
267
+ provider_prompt = {}
268
+ for k, v in kwargs.items():
269
+ if k == "messages":
270
+ provider_prompt[k] = [m.model_dump() if hasattr(m, "model_dump") else m for m in v]
271
+ elif k in ["extra_headers", "extra_query"]:
272
+ pass
273
+ else:
274
+ provider_prompt[k] = v
275
+
276
+ if self._log_prompt_and_response:
277
+ ingest["provider_request_json"] = json.dumps(provider_prompt)
278
+
279
+ sw.start()
280
+ response = wrapped(*args, **kwargs.copy())
281
+
282
+ except Exception as e: # pylint: disable=broad-except
283
+ sw.stop()
284
+ duration = sw.elapsed_ms_int()
285
+
286
+ # TODO ingest error
287
+
288
+ raise e
289
+
290
+ if stream:
291
+ return ChatStreamWrapper(
292
+ response=response,
293
+ instance=instance,
294
+ instrumentor=self,
295
+ log_prompt_and_response=self._log_prompt_and_response,
296
+ ingest=ingest,
297
+ stopwatch=sw,
298
+ process_chunk=process_chunk,
299
+ )
300
+
301
+ sw.stop()
302
+ duration = sw.elapsed_ms_int()
303
+ ingest["end_to_end_latency_ms"] = duration
304
+ ingest["http_status_code"] = 200
305
+
306
+ if process_synchronous_response:
307
+ process_synchronous_response(response, ingest, self._log_prompt_and_response)
308
+
309
+ self._ingest_units(ingest)
310
+
311
+ return response
312
+
313
+ @staticmethod
314
+ def _update_headers(
315
+ context: "dict[str, Any]",
316
+ extra_headers: "dict[str, str]",
317
+ ) -> None:
318
+ limit_ids: Optional[list[str]] = context.get("limit_ids")
319
+ request_tags: Optional[list[str]] = context.get("request_tags")
320
+ experience_name: Optional[str] = context.get("experience_name")
321
+ experience_id: Optional[str] = context.get("experience_id")
322
+ user_id: Optional[str] = context.get("user_id")
323
+
324
+ if limit_ids is not None:
325
+ existing_limit_ids = extra_headers.get("xProxy-Limit-IDs")
326
+ limit_ids_str = ",".join(limit_ids)
327
+ if existing_limit_ids is None:
328
+ extra_headers["xProxy-Limit-IDs"] = limit_ids_str
329
+ else:
330
+ extra_headers["xProxy-Limit-IDs"] = f"{existing_limit_ids},{limit_ids_str}"
331
+
332
+ if request_tags is not None:
333
+ existing_request_tags = extra_headers.get("xProxy-Request-Tags")
334
+ request_tags_str = ",".join(request_tags)
335
+ if existing_request_tags is None:
336
+ extra_headers["xProxy-Request-Tags"] = request_tags_str
337
+ else:
338
+ extra_headers["xProxy-Request-Tags"] = f"{existing_request_tags},{request_tags_str}"
339
+
340
+ if experience_name is not None:
341
+ extra_headers["xProxy-Experience-Name"] = experience_name
342
+
343
+ if experience_id is not None:
344
+ extra_headers["xProxy-Experience-ID"] = experience_id
345
+
346
+ if user_id is not None:
347
+ extra_headers["xProxy-User-ID"] = user_id
348
+
349
+ @staticmethod
350
+ def payi_wrapper(func: Any) -> Any:
351
+ def _payi_wrapper(o: Any) -> Any:
352
+ def wrapper(wrapped: Any, instance: Any, args: Any, kwargs: Any) -> Any:
353
+ return func(
354
+ o,
355
+ wrapped,
356
+ instance,
357
+ args,
358
+ kwargs,
359
+ )
360
+
361
+ return wrapper
362
+
363
+ return _payi_wrapper
364
+
365
+
366
+ class ChatStreamWrapper(ObjectProxy): # type: ignore
367
+ def __init__(
368
+ self,
369
+ response: Any,
370
+ instance: Any,
371
+ instrumentor: PayiInstrumentor,
372
+ ingest: IngestUnitsParams,
373
+ stopwatch: Stopwatch,
374
+ process_chunk: Optional[Callable[[Any, IngestUnitsParams], None]] = None,
375
+ log_prompt_and_response: bool = True,
376
+ ) -> None:
377
+ super().__init__(response) # type: ignore
378
+
379
+ self._response = response
380
+ self._instance = instance
381
+
382
+ self._instrumentor = instrumentor
383
+ self._stopwatch: Stopwatch = stopwatch
384
+ self._ingest: IngestUnitsParams = ingest
385
+ self._log_prompt_and_response: bool = log_prompt_and_response
386
+ self._responses: list[str] = []
387
+
388
+ self._process_chunk: Optional[Callable[[Any, IngestUnitsParams], None]] = process_chunk
389
+
390
+ self._first_token: bool = True
391
+
392
+ def __enter__(self) -> Any:
393
+ return self
394
+
395
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
396
+ self.__wrapped__.__exit__(exc_type, exc_val, exc_tb) # type: ignore
397
+
398
+ async def __aenter__(self) -> Any:
399
+ return self
400
+
401
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
402
+ await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb) # type: ignore
403
+
404
+ def __iter__(self) -> Any:
405
+ return self
406
+
407
+ def __aiter__(self) -> Any:
408
+ return self
409
+
410
+ def __next__(self) -> Any:
411
+ try:
412
+ chunk: Any = self.__wrapped__.__next__() # type: ignore
413
+ except Exception as e:
414
+ if isinstance(e, StopIteration):
415
+ self._stop_iteration()
416
+ raise e
417
+ else:
418
+ self._evaluate_chunk(chunk)
419
+ return chunk
420
+
421
+ async def __anext__(self) -> Any:
422
+ try:
423
+ chunk: Any = await self.__wrapped__.__anext__() # type: ignore
424
+ except Exception as e:
425
+ if isinstance(e, StopAsyncIteration):
426
+ self._stop_iteration()
427
+ raise e
428
+ else:
429
+ self._evaluate_chunk(chunk)
430
+ return chunk
431
+
432
+ def _evaluate_chunk(self, chunk: Any) -> None:
433
+ if self._first_token:
434
+ self._ingest["time_to_first_token_ms"] = self._stopwatch.elapsed_ms_int()
435
+ self._first_token = False
436
+
437
+ if self._log_prompt_and_response:
438
+ self._responses.append(chunk.to_json())
439
+
440
+ if self._process_chunk:
441
+ self._process_chunk(chunk, self._ingest)
442
+
443
+ def _stop_iteration(self) -> None:
444
+ self._stopwatch.stop()
445
+ self._ingest["end_to_end_latency_ms"] = self._stopwatch.elapsed_ms_int()
446
+ self._ingest["http_status_code"] = 200
447
+
448
+ if self._log_prompt_and_response:
449
+ self._ingest["provider_response_json"] = self._responses
450
+
451
+ self._instrumentor._ingest_units(self._ingest)
452
+
453
+
454
+ global _instrumentor
455
+ _instrumentor: PayiInstrumentor
456
+
457
+
458
+ def payi_instrument(
459
+ payi: Optional[Union[Payi, AsyncPayi]] = None,
460
+ instruments: Optional[Set[Instruments]] = None,
461
+ log_prompt_and_response: bool = True,
462
+ prompt_and_response_logger: Optional[Callable[[str, "dict[str, str]"], None]] = None,
463
+ ) -> None:
464
+ global _instrumentor
465
+ _instrumentor = PayiInstrumentor(
466
+ payi=payi,
467
+ instruments=instruments,
468
+ log_prompt_and_response=log_prompt_and_response,
469
+ prompt_and_response_logger=prompt_and_response_logger,
470
+ )
471
+
472
+
473
+ def ingest(
474
+ limit_ids: Optional["list[str]"] = None,
475
+ request_tags: Optional["list[str]"] = None,
476
+ experience_name: Optional[str] = None,
477
+ experience_id: Optional[str] = None,
478
+ user_id: Optional[str] = None,
479
+ ) -> Any:
480
+ def _ingest(func: Any) -> Any:
481
+ def _ingest_wrapper(*args: Any, **kwargs: Any) -> Any:
482
+ return _instrumentor._call_func(
483
+ func,
484
+ False, # false -> ingest
485
+ limit_ids,
486
+ request_tags,
487
+ experience_name,
488
+ experience_id,
489
+ user_id,
490
+ *args,
491
+ **kwargs,
492
+ )
493
+
494
+ return _ingest_wrapper
495
+
496
+ return _ingest
497
+
498
+
499
+ def proxy(
500
+ limit_ids: Optional["list[str]"] = None,
501
+ request_tags: Optional["list[str]"] = None,
502
+ experience_name: Optional[str] = None,
503
+ experience_id: Optional[str] = None,
504
+ user_id: Optional[str] = None,
505
+ ) -> Any:
506
+ def _proxy(func: Any) -> Any:
507
+ def _proxy_wrapper(*args: Any, **kwargs: Any) -> Any:
508
+ return _instrumentor._call_func(
509
+ func, True, limit_ids, request_tags, experience_name, experience_id, user_id, *args, **kwargs
510
+ )
511
+
512
+ return _proxy_wrapper
513
+
514
+ return _proxy
@@ -32,7 +32,7 @@ class BillingModelsResource(SyncAPIResource):
32
32
  @cached_property
33
33
  def with_raw_response(self) -> BillingModelsResourceWithRawResponse:
34
34
  """
35
- This property can be used as a prefix for any HTTP method call to return the
35
+ This property can be used as a prefix for any HTTP method call to return
36
36
  the raw response object instead of the parsed content.
37
37
 
38
38
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -226,7 +226,7 @@ class AsyncBillingModelsResource(AsyncAPIResource):
226
226
  @cached_property
227
227
  def with_raw_response(self) -> AsyncBillingModelsResourceWithRawResponse:
228
228
  """
229
- This property can be used as a prefix for any HTTP method call to return the
229
+ This property can be used as a prefix for any HTTP method call to return
230
230
  the raw response object instead of the parsed content.
231
231
 
232
232
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -38,7 +38,7 @@ class CategoriesResource(SyncAPIResource):
38
38
  @cached_property
39
39
  def with_raw_response(self) -> CategoriesResourceWithRawResponse:
40
40
  """
41
- This property can be used as a prefix for any HTTP method call to return the
41
+ This property can be used as a prefix for any HTTP method call to return
42
42
  the raw response object instead of the parsed content.
43
43
 
44
44
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -184,7 +184,7 @@ class AsyncCategoriesResource(AsyncAPIResource):
184
184
  @cached_property
185
185
  def with_raw_response(self) -> AsyncCategoriesResourceWithRawResponse:
186
186
  """
187
- This property can be used as a prefix for any HTTP method call to return the
187
+ This property can be used as a prefix for any HTTP method call to return
188
188
  the raw response object instead of the parsed content.
189
189
 
190
190
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -32,7 +32,7 @@ class ResourcesResource(SyncAPIResource):
32
32
  @cached_property
33
33
  def with_raw_response(self) -> ResourcesResourceWithRawResponse:
34
34
  """
35
- This property can be used as a prefix for any HTTP method call to return the
35
+ This property can be used as a prefix for any HTTP method call to return
36
36
  the raw response object instead of the parsed content.
37
37
 
38
38
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -218,7 +218,7 @@ class AsyncResourcesResource(AsyncAPIResource):
218
218
  @cached_property
219
219
  def with_raw_response(self) -> AsyncResourcesResourceWithRawResponse:
220
220
  """
221
- This property can be used as a prefix for any HTTP method call to return the
221
+ This property can be used as a prefix for any HTTP method call to return
222
222
  the raw response object instead of the parsed content.
223
223
 
224
224
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -47,7 +47,7 @@ class ExperiencesResource(SyncAPIResource):
47
47
  @cached_property
48
48
  def with_raw_response(self) -> ExperiencesResourceWithRawResponse:
49
49
  """
50
- This property can be used as a prefix for any HTTP method call to return the
50
+ This property can be used as a prefix for any HTTP method call to return
51
51
  the raw response object instead of the parsed content.
52
52
 
53
53
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -175,7 +175,7 @@ class AsyncExperiencesResource(AsyncAPIResource):
175
175
  @cached_property
176
176
  def with_raw_response(self) -> AsyncExperiencesResourceWithRawResponse:
177
177
  """
178
- This property can be used as a prefix for any HTTP method call to return the
178
+ This property can be used as a prefix for any HTTP method call to return
179
179
  the raw response object instead of the parsed content.
180
180
 
181
181
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -30,7 +30,7 @@ class PropertiesResource(SyncAPIResource):
30
30
  @cached_property
31
31
  def with_raw_response(self) -> PropertiesResourceWithRawResponse:
32
32
  """
33
- This property can be used as a prefix for any HTTP method call to return the
33
+ This property can be used as a prefix for any HTTP method call to return
34
34
  the raw response object instead of the parsed content.
35
35
 
36
36
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -86,7 +86,7 @@ class AsyncPropertiesResource(AsyncAPIResource):
86
86
  @cached_property
87
87
  def with_raw_response(self) -> AsyncPropertiesResourceWithRawResponse:
88
88
  """
89
- This property can be used as a prefix for any HTTP method call to return the
89
+ This property can be used as a prefix for any HTTP method call to return
90
90
  the raw response object instead of the parsed content.
91
91
 
92
92
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -31,7 +31,7 @@ class TypesResource(SyncAPIResource):
31
31
  @cached_property
32
32
  def with_raw_response(self) -> TypesResourceWithRawResponse:
33
33
  """
34
- This property can be used as a prefix for any HTTP method call to return the
34
+ This property can be used as a prefix for any HTTP method call to return
35
35
  the raw response object instead of the parsed content.
36
36
 
37
37
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -238,7 +238,7 @@ class AsyncTypesResource(AsyncAPIResource):
238
238
  @cached_property
239
239
  def with_raw_response(self) -> AsyncTypesResourceWithRawResponse:
240
240
  """
241
- This property can be used as a prefix for any HTTP method call to return the
241
+ This property can be used as a prefix for any HTTP method call to return
242
242
  the raw response object instead of the parsed content.
243
243
 
244
244
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers