payi 0.1.0a35__py3-none-any.whl → 0.1.0a37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of payi might be problematic. Click here for more details.

payi/lib/instrument.py ADDED
@@ -0,0 +1,522 @@
1
+ import json
2
+ import uuid
3
+ import asyncio
4
+ import inspect
5
+ import logging
6
+ import traceback
7
+ from typing import Any, Set, Union, Callable, Optional
8
+
9
+ from wrapt import ObjectProxy # type: ignore
10
+
11
+ from payi import Payi, AsyncPayi
12
+ from payi.types import IngestUnitsParams
13
+
14
+ from .Stopwatch import Stopwatch
15
+ from .Instruments import Instruments
16
+
17
+
18
+ class PayiInstrumentor:
19
+ def __init__(
20
+ self,
21
+ payi: Union[Payi, AsyncPayi, None] = None,
22
+ instruments: Union[Set[Instruments], None] = None,
23
+ log_prompt_and_response: bool = True,
24
+ prompt_and_response_logger: Optional[
25
+ Callable[[str, "dict[str, str]"], None]
26
+ ] = None, # (request id, dict of data to store) -> None
27
+ ):
28
+ self._payi: Union[Payi, AsyncPayi, None] = payi
29
+ self._context_stack: list[dict[str, Any]] = [] # Stack of context dictionaries
30
+ self._log_prompt_and_response: bool = log_prompt_and_response
31
+ self._prompt_and_response_logger: Optional[Callable[[str, dict[str, str]], None]] = prompt_and_response_logger
32
+
33
+ self._blocked_limits: set[str] = set()
34
+ self._exceeded_limits: set[str] = set()
35
+
36
+ if instruments is None or Instruments.ALL in instruments:
37
+ self._instrument_all()
38
+ else:
39
+ self._instrument_specific(instruments)
40
+
41
+ def _instrument_all(self) -> None:
42
+ self._instrument_openai()
43
+ self._instrument_anthropic()
44
+
45
+ def _instrument_specific(self, instruments: Set[Instruments]) -> None:
46
+ if Instruments.OPENAI in instruments:
47
+ self._instrument_openai()
48
+ if Instruments.ANTHROPIC in instruments:
49
+ self._instrument_anthropic()
50
+
51
+ def _instrument_openai(self) -> None:
52
+ from .OpenAIInstrumentor import OpenAiInstrumentor
53
+
54
+ try:
55
+ OpenAiInstrumentor.instrument(self)
56
+
57
+ except Exception as e:
58
+ logging.error(f"Error instrumenting OpenAI: {e}")
59
+
60
+ def _instrument_anthropic(self) -> None:
61
+ from .AnthropicInstrumentor import AnthropicIntrumentor
62
+
63
+ try:
64
+ AnthropicIntrumentor.instrument(self)
65
+
66
+ except Exception as e:
67
+ logging.error(f"Error instrumenting Anthropic: {e}")
68
+
69
+ def _ingest_units(self, ingest_units: IngestUnitsParams) -> None:
70
+ # return early if there are no units to ingest and on a successul ingest request
71
+ if int(ingest_units.get("http_status_code") or 0) < 400:
72
+ units = ingest_units.get("units", {})
73
+ if not units or all(unit.get("input", 0) == 0 and unit.get("output", 0) == 0 for unit in units.values()):
74
+ logging.error(
75
+ 'No units to ingest. For OpenAI streaming calls, make sure you pass stream_options={"include_usage": True}'
76
+ )
77
+ return
78
+
79
+ try:
80
+ if isinstance(self._payi, AsyncPayi):
81
+ loop = asyncio.new_event_loop()
82
+ asyncio.set_event_loop(loop)
83
+ try:
84
+ ingest_result = loop.run_until_complete(self._payi.ingest.units(**ingest_units))
85
+ finally:
86
+ loop.close()
87
+ elif isinstance(self._payi, Payi):
88
+ ingest_result = self._payi.ingest.units(**ingest_units)
89
+ else:
90
+ logging.error("No payi instance to ingest units")
91
+ return
92
+
93
+ if ingest_result.xproxy_result.limits:
94
+ for limit_id, state in ingest_result.xproxy_result.limits.items():
95
+ removeBlockedId: bool = False
96
+
97
+ if state.state == "blocked":
98
+ self._blocked_limits.add(limit_id)
99
+ elif state.state == "exceeded":
100
+ self._exceeded_limits.add(limit_id)
101
+ removeBlockedId = True
102
+ elif state.state == "ok":
103
+ removeBlockedId = True
104
+
105
+ # opportunistically remove blocked limits
106
+ if removeBlockedId:
107
+ self._blocked_limits.discard(limit_id)
108
+
109
+ if self._log_prompt_and_response and self._prompt_and_response_logger:
110
+ request_id = ingest_result.xproxy_result.request_id
111
+
112
+ log_data = {}
113
+ response_json = ingest_units.pop("provider_response_json", None)
114
+ request_json = ingest_units.pop("provider_request_json", None)
115
+ stack_trace = ingest_units.get("properties", {}).pop("system.stack_trace", None) # type: ignore
116
+
117
+ if response_json is not None:
118
+ # response_json is a list of strings, convert a single json string
119
+ log_data["provider_response_json"] = json.dumps(response_json)
120
+ if request_json is not None:
121
+ log_data["provider_request_json"] = request_json
122
+ if stack_trace is not None:
123
+ log_data["stack_trace"] = stack_trace
124
+
125
+ self._prompt_and_response_logger(request_id, log_data) # type: ignore
126
+
127
+ except Exception as e:
128
+ logging.error(f"Error Pay-i ingesting result: {e}")
129
+
130
+ def _call_func(
131
+ self,
132
+ func: Any,
133
+ proxy: bool,
134
+ limit_ids: Optional["list[str]"],
135
+ request_tags: Optional["list[str]"],
136
+ experience_name: Optional[str],
137
+ experience_id: Optional[str],
138
+ user_id: Optional[str],
139
+ *args: Any,
140
+ **kwargs: Any,
141
+ ) -> Any:
142
+ if len(self._context_stack) > 0:
143
+ # copy current context into the upcoming context
144
+ context = self._context_stack[-1].copy()
145
+ context.pop("proxy", None)
146
+ previous_experience_name = context["experience_name"]
147
+ previous_experience_id = context["experience_id"]
148
+ else:
149
+ context = {}
150
+ previous_experience_name = None
151
+ previous_experience_id = None
152
+
153
+ with self:
154
+ context["proxy"] = proxy
155
+
156
+ # Handle experience name and ID logic
157
+ if not experience_name:
158
+ # If no experience_name specified, use previous values
159
+ context["experience_name"] = previous_experience_name
160
+ context["experience_id"] = previous_experience_id
161
+ else:
162
+ # If experience_name is specified
163
+ if experience_name == previous_experience_name:
164
+ # Same experience name, use previous ID unless new one specified
165
+ context["experience_name"] = experience_name
166
+ context["experience_id"] = experience_id if experience_id else previous_experience_id
167
+ else:
168
+ # Different experience name, use specified ID or generate one
169
+ context["experience_name"] = experience_name
170
+ context["experience_id"] = experience_id if experience_id else str(uuid.uuid4())
171
+
172
+ # set any values explicitly passed by the caller, otherwise use what is already in the context
173
+ if limit_ids:
174
+ context["limit_ids"] = limit_ids
175
+ if request_tags:
176
+ context["request_tags"] = request_tags
177
+ if user_id:
178
+ context["user_id"] = user_id
179
+
180
+ self.set_context(context)
181
+
182
+ return func(*args, **kwargs)
183
+
184
+ def __enter__(self) -> Any:
185
+ # Push a new context dictionary onto the stack
186
+ self._context_stack.append({})
187
+ return self
188
+
189
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
190
+ # Pop the current context off the stack
191
+ if self._context_stack:
192
+ self._context_stack.pop()
193
+
194
+ def set_context(self, context: "dict[str, Any]") -> None:
195
+ # Update the current top of the stack with the provided context
196
+ if self._context_stack:
197
+ self._context_stack[-1].update(context)
198
+
199
+ def get_context(self) -> Optional["dict[str, Any]"]:
200
+ # Return the current top of the stack
201
+ return self._context_stack[-1] if self._context_stack else None
202
+
203
+ def chat_wrapper(
204
+ self,
205
+ category: str,
206
+ process_chunk: Callable[[Any, IngestUnitsParams], None],
207
+ process_synchronous_response: Optional[Callable[[Any, IngestUnitsParams, bool], None]],
208
+ wrapped: Any,
209
+ instance: Any,
210
+ args: Any,
211
+ kwargs: Any,
212
+ ) -> Any:
213
+ context = self.get_context()
214
+
215
+ if not context:
216
+ # should not happen
217
+ return wrapped(*args, **kwargs)
218
+
219
+ # after _udpate_headers, all metadata to add to ingest is in extra_headers, keyed by the xproxy-xxx header name
220
+ extra_headers = kwargs.get("extra_headers", {})
221
+ self._update_headers(context, extra_headers)
222
+
223
+ if context.get("proxy", True):
224
+ if "extra_headers" not in kwargs:
225
+ kwargs["extra_headers"] = extra_headers
226
+
227
+ return wrapped(*args, **kwargs)
228
+
229
+ ingest: IngestUnitsParams = {"category": category, "resource": kwargs.get("model"), "units": {}}
230
+
231
+ # blocked_limit = next((limit for limit in (context.get('limit_ids') or []) if limit in self._blocked_limits), None)
232
+ # if blocked_limit:
233
+ # raise Exception(f"Limit {blocked_limit} is blocked")
234
+ current_frame = inspect.currentframe()
235
+ # f_back excludes the current frame, strip() cleans up whitespace and newlines
236
+ stack = [frame.strip() for frame in traceback.format_stack(current_frame.f_back)] # type: ignore
237
+
238
+ # TODO add back once feature is in prod
239
+ # ingest['properties'] = { 'system.stack_trace': json.dumps(stack) }
240
+
241
+ sw = Stopwatch()
242
+ stream = kwargs.get("stream", False)
243
+
244
+ try:
245
+ limit_ids = extra_headers.pop("xProxy-Limit-IDs", None)
246
+ request_tags = extra_headers.pop("xProxy-Request-Tags", None)
247
+ experience_name = extra_headers.pop("xProxy-Experience-Name", None)
248
+ experience_id = extra_headers.pop("xProxy-Experience-ID", None)
249
+ user_id = extra_headers.pop("xProxy-User-ID", None)
250
+
251
+ if limit_ids:
252
+ ingest["limit_ids"] = limit_ids.split(",")
253
+ if request_tags:
254
+ ingest["request_tags"] = request_tags.split(",")
255
+ if experience_name:
256
+ ingest["experience_name"] = experience_name
257
+ if experience_id:
258
+ ingest["experience_id"] = experience_id
259
+ if user_id:
260
+ ingest["user_id"] = user_id
261
+
262
+ if len(extra_headers) > 0:
263
+ ingest["provider_request_headers"] = {k: [v] for k, v in extra_headers.items()} # type: ignore
264
+
265
+ provider_prompt = {}
266
+ for k, v in kwargs.items():
267
+ if k == "messages":
268
+ provider_prompt[k] = [m.model_dump() if hasattr(m, "model_dump") else m for m in v]
269
+ elif k in ["extra_headers", "extra_query"]:
270
+ pass
271
+ else:
272
+ provider_prompt[k] = v
273
+
274
+ if self._log_prompt_and_response:
275
+ ingest["provider_request_json"] = json.dumps(provider_prompt)
276
+
277
+ sw.start()
278
+ response = wrapped(*args, **kwargs.copy())
279
+
280
+ except Exception as e: # pylint: disable=broad-except
281
+ sw.stop()
282
+ duration = sw.elapsed_ms_int()
283
+
284
+ # TODO ingest error
285
+
286
+ raise e
287
+
288
+ if stream:
289
+ return ChatStreamWrapper(
290
+ response=response,
291
+ instance=instance,
292
+ instrumentor=self,
293
+ log_prompt_and_response=self._log_prompt_and_response,
294
+ ingest=ingest,
295
+ stopwatch=sw,
296
+ process_chunk=process_chunk,
297
+ )
298
+
299
+ sw.stop()
300
+ duration = sw.elapsed_ms_int()
301
+ ingest["end_to_end_latency_ms"] = duration
302
+ ingest["http_status_code"] = 200
303
+
304
+ if process_synchronous_response:
305
+ process_synchronous_response(response, ingest, self._log_prompt_and_response)
306
+
307
+ self._ingest_units(ingest)
308
+
309
+ return response
310
+
311
+ @staticmethod
312
+ def _update_headers(
313
+ context: "dict[str, Any]",
314
+ extra_headers: "dict[str, str]",
315
+ ) -> None:
316
+ limit_ids: Optional[list[str]] = context.get("limit_ids")
317
+ request_tags: Optional[list[str]] = context.get("request_tags")
318
+ experience_name: Optional[str] = context.get("experience_name")
319
+ experience_id: Optional[str] = context.get("experience_id")
320
+ user_id: Optional[str] = context.get("user_id")
321
+
322
+ # Merge limits from the decorator and extra headers
323
+ if limit_ids is not None:
324
+ existing_limit_ids = extra_headers.get("xProxy-Limit-IDs", None)
325
+
326
+ if not existing_limit_ids:
327
+ extra_headers["xProxy-Limit-IDs"] = ",".join(limit_ids)
328
+ else:
329
+ existing_ids = existing_limit_ids.split(',')
330
+ combined_ids = list(set(existing_ids + limit_ids))
331
+ extra_headers["xProxy-Limit-IDs"] = ",".join(combined_ids)
332
+
333
+ # Merge request from the decorator and extra headers
334
+ if request_tags is not None:
335
+ existing_request_tags = extra_headers.get("xProxy-Request-Tags", None)
336
+
337
+ if not existing_request_tags:
338
+ extra_headers["xProxy-Request-Tags"] = ",".join(request_tags)
339
+ else:
340
+ existing_tags = existing_request_tags.split(',')
341
+ combined_tags = list(set(existing_tags + request_tags))
342
+ extra_headers["xProxy-Request-Tags"] = ",".join(combined_tags)
343
+
344
+ # inner extra_headers user_id takes precedence over outer decorator user_id
345
+ if user_id is not None and extra_headers.get("xProxy-User-ID", None) is None:
346
+ extra_headers["xProxy-User-ID"] = user_id
347
+
348
+ # inner extra_headers experience_name and experience_id take precedence over outer decorator experience_name and experience_id
349
+ # if either inner value is specified, ignore outer decorator values
350
+ if extra_headers.get("xProxy-Experience-Name", None) is None and extra_headers.get("xProxy-Experience-ID", None) is None:
351
+ if experience_name is not None:
352
+ extra_headers["xProxy-Experience-Name"] = experience_name
353
+
354
+ if experience_id is not None:
355
+ extra_headers["xProxy-Experience-ID"] = experience_id
356
+
357
+ @staticmethod
358
+ def payi_wrapper(func: Any) -> Any:
359
+ def _payi_wrapper(o: Any) -> Any:
360
+ def wrapper(wrapped: Any, instance: Any, args: Any, kwargs: Any) -> Any:
361
+ return func(
362
+ o,
363
+ wrapped,
364
+ instance,
365
+ args,
366
+ kwargs,
367
+ )
368
+
369
+ return wrapper
370
+
371
+ return _payi_wrapper
372
+
373
+
374
+ class ChatStreamWrapper(ObjectProxy): # type: ignore
375
+ def __init__(
376
+ self,
377
+ response: Any,
378
+ instance: Any,
379
+ instrumentor: PayiInstrumentor,
380
+ ingest: IngestUnitsParams,
381
+ stopwatch: Stopwatch,
382
+ process_chunk: Optional[Callable[[Any, IngestUnitsParams], None]] = None,
383
+ log_prompt_and_response: bool = True,
384
+ ) -> None:
385
+ super().__init__(response) # type: ignore
386
+
387
+ self._response = response
388
+ self._instance = instance
389
+
390
+ self._instrumentor = instrumentor
391
+ self._stopwatch: Stopwatch = stopwatch
392
+ self._ingest: IngestUnitsParams = ingest
393
+ self._log_prompt_and_response: bool = log_prompt_and_response
394
+ self._responses: list[str] = []
395
+
396
+ self._process_chunk: Optional[Callable[[Any, IngestUnitsParams], None]] = process_chunk
397
+
398
+ self._first_token: bool = True
399
+
400
+ def __enter__(self) -> Any:
401
+ return self
402
+
403
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
404
+ self.__wrapped__.__exit__(exc_type, exc_val, exc_tb) # type: ignore
405
+
406
+ async def __aenter__(self) -> Any:
407
+ return self
408
+
409
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
410
+ await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb) # type: ignore
411
+
412
+ def __iter__(self) -> Any:
413
+ return self
414
+
415
+ def __aiter__(self) -> Any:
416
+ return self
417
+
418
+ def __next__(self) -> Any:
419
+ try:
420
+ chunk: Any = self.__wrapped__.__next__() # type: ignore
421
+ except Exception as e:
422
+ if isinstance(e, StopIteration):
423
+ self._stop_iteration()
424
+ raise e
425
+ else:
426
+ self._evaluate_chunk(chunk)
427
+ return chunk
428
+
429
+ async def __anext__(self) -> Any:
430
+ try:
431
+ chunk: Any = await self.__wrapped__.__anext__() # type: ignore
432
+ except Exception as e:
433
+ if isinstance(e, StopAsyncIteration):
434
+ self._stop_iteration()
435
+ raise e
436
+ else:
437
+ self._evaluate_chunk(chunk)
438
+ return chunk
439
+
440
+ def _evaluate_chunk(self, chunk: Any) -> None:
441
+ if self._first_token:
442
+ self._ingest["time_to_first_token_ms"] = self._stopwatch.elapsed_ms_int()
443
+ self._first_token = False
444
+
445
+ if self._log_prompt_and_response:
446
+ self._responses.append(chunk.to_json())
447
+
448
+ if self._process_chunk:
449
+ self._process_chunk(chunk, self._ingest)
450
+
451
+ def _stop_iteration(self) -> None:
452
+ self._stopwatch.stop()
453
+ self._ingest["end_to_end_latency_ms"] = self._stopwatch.elapsed_ms_int()
454
+ self._ingest["http_status_code"] = 200
455
+
456
+ if self._log_prompt_and_response:
457
+ self._ingest["provider_response_json"] = self._responses
458
+
459
+ self._instrumentor._ingest_units(self._ingest)
460
+
461
+
462
+ global _instrumentor
463
+ _instrumentor: PayiInstrumentor
464
+
465
+
466
+ def payi_instrument(
467
+ payi: Optional[Union[Payi, AsyncPayi]] = None,
468
+ instruments: Optional[Set[Instruments]] = None,
469
+ log_prompt_and_response: bool = True,
470
+ prompt_and_response_logger: Optional[Callable[[str, "dict[str, str]"], None]] = None,
471
+ ) -> None:
472
+ global _instrumentor
473
+ _instrumentor = PayiInstrumentor(
474
+ payi=payi,
475
+ instruments=instruments,
476
+ log_prompt_and_response=log_prompt_and_response,
477
+ prompt_and_response_logger=prompt_and_response_logger,
478
+ )
479
+
480
+
481
+ def ingest(
482
+ limit_ids: Optional["list[str]"] = None,
483
+ request_tags: Optional["list[str]"] = None,
484
+ experience_name: Optional[str] = None,
485
+ experience_id: Optional[str] = None,
486
+ user_id: Optional[str] = None,
487
+ ) -> Any:
488
+ def _ingest(func: Any) -> Any:
489
+ def _ingest_wrapper(*args: Any, **kwargs: Any) -> Any:
490
+ return _instrumentor._call_func(
491
+ func,
492
+ False, # false -> ingest
493
+ limit_ids,
494
+ request_tags,
495
+ experience_name,
496
+ experience_id,
497
+ user_id,
498
+ *args,
499
+ **kwargs,
500
+ )
501
+
502
+ return _ingest_wrapper
503
+
504
+ return _ingest
505
+
506
+
507
+ def proxy(
508
+ limit_ids: Optional["list[str]"] = None,
509
+ request_tags: Optional["list[str]"] = None,
510
+ experience_name: Optional[str] = None,
511
+ experience_id: Optional[str] = None,
512
+ user_id: Optional[str] = None,
513
+ ) -> Any:
514
+ def _proxy(func: Any) -> Any:
515
+ def _proxy_wrapper(*args: Any, **kwargs: Any) -> Any:
516
+ return _instrumentor._call_func(
517
+ func, True, limit_ids, request_tags, experience_name, experience_id, user_id, *args, **kwargs
518
+ )
519
+
520
+ return _proxy_wrapper
521
+
522
+ return _proxy
@@ -32,7 +32,7 @@ class BillingModelsResource(SyncAPIResource):
32
32
  @cached_property
33
33
  def with_raw_response(self) -> BillingModelsResourceWithRawResponse:
34
34
  """
35
- This property can be used as a prefix for any HTTP method call to return the
35
+ This property can be used as a prefix for any HTTP method call to return
36
36
  the raw response object instead of the parsed content.
37
37
 
38
38
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -226,7 +226,7 @@ class AsyncBillingModelsResource(AsyncAPIResource):
226
226
  @cached_property
227
227
  def with_raw_response(self) -> AsyncBillingModelsResourceWithRawResponse:
228
228
  """
229
- This property can be used as a prefix for any HTTP method call to return the
229
+ This property can be used as a prefix for any HTTP method call to return
230
230
  the raw response object instead of the parsed content.
231
231
 
232
232
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -38,7 +38,7 @@ class CategoriesResource(SyncAPIResource):
38
38
  @cached_property
39
39
  def with_raw_response(self) -> CategoriesResourceWithRawResponse:
40
40
  """
41
- This property can be used as a prefix for any HTTP method call to return the
41
+ This property can be used as a prefix for any HTTP method call to return
42
42
  the raw response object instead of the parsed content.
43
43
 
44
44
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -184,7 +184,7 @@ class AsyncCategoriesResource(AsyncAPIResource):
184
184
  @cached_property
185
185
  def with_raw_response(self) -> AsyncCategoriesResourceWithRawResponse:
186
186
  """
187
- This property can be used as a prefix for any HTTP method call to return the
187
+ This property can be used as a prefix for any HTTP method call to return
188
188
  the raw response object instead of the parsed content.
189
189
 
190
190
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -32,7 +32,7 @@ class ResourcesResource(SyncAPIResource):
32
32
  @cached_property
33
33
  def with_raw_response(self) -> ResourcesResourceWithRawResponse:
34
34
  """
35
- This property can be used as a prefix for any HTTP method call to return the
35
+ This property can be used as a prefix for any HTTP method call to return
36
36
  the raw response object instead of the parsed content.
37
37
 
38
38
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -218,7 +218,7 @@ class AsyncResourcesResource(AsyncAPIResource):
218
218
  @cached_property
219
219
  def with_raw_response(self) -> AsyncResourcesResourceWithRawResponse:
220
220
  """
221
- This property can be used as a prefix for any HTTP method call to return the
221
+ This property can be used as a prefix for any HTTP method call to return
222
222
  the raw response object instead of the parsed content.
223
223
 
224
224
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -47,7 +47,7 @@ class ExperiencesResource(SyncAPIResource):
47
47
  @cached_property
48
48
  def with_raw_response(self) -> ExperiencesResourceWithRawResponse:
49
49
  """
50
- This property can be used as a prefix for any HTTP method call to return the
50
+ This property can be used as a prefix for any HTTP method call to return
51
51
  the raw response object instead of the parsed content.
52
52
 
53
53
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -175,7 +175,7 @@ class AsyncExperiencesResource(AsyncAPIResource):
175
175
  @cached_property
176
176
  def with_raw_response(self) -> AsyncExperiencesResourceWithRawResponse:
177
177
  """
178
- This property can be used as a prefix for any HTTP method call to return the
178
+ This property can be used as a prefix for any HTTP method call to return
179
179
  the raw response object instead of the parsed content.
180
180
 
181
181
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -30,7 +30,7 @@ class PropertiesResource(SyncAPIResource):
30
30
  @cached_property
31
31
  def with_raw_response(self) -> PropertiesResourceWithRawResponse:
32
32
  """
33
- This property can be used as a prefix for any HTTP method call to return the
33
+ This property can be used as a prefix for any HTTP method call to return
34
34
  the raw response object instead of the parsed content.
35
35
 
36
36
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -86,7 +86,7 @@ class AsyncPropertiesResource(AsyncAPIResource):
86
86
  @cached_property
87
87
  def with_raw_response(self) -> AsyncPropertiesResourceWithRawResponse:
88
88
  """
89
- This property can be used as a prefix for any HTTP method call to return the
89
+ This property can be used as a prefix for any HTTP method call to return
90
90
  the raw response object instead of the parsed content.
91
91
 
92
92
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -31,7 +31,7 @@ class TypesResource(SyncAPIResource):
31
31
  @cached_property
32
32
  def with_raw_response(self) -> TypesResourceWithRawResponse:
33
33
  """
34
- This property can be used as a prefix for any HTTP method call to return the
34
+ This property can be used as a prefix for any HTTP method call to return
35
35
  the raw response object instead of the parsed content.
36
36
 
37
37
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers
@@ -238,7 +238,7 @@ class AsyncTypesResource(AsyncAPIResource):
238
238
  @cached_property
239
239
  def with_raw_response(self) -> AsyncTypesResourceWithRawResponse:
240
240
  """
241
- This property can be used as a prefix for any HTTP method call to return the
241
+ This property can be used as a prefix for any HTTP method call to return
242
242
  the raw response object instead of the parsed content.
243
243
 
244
244
  For more information, see https://www.github.com/Pay-i/pay-i-python#accessing-raw-response-data-eg-headers