omnata-plugin-runtime 0.4.6a101__tar.gz → 0.4.6a103__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.4.6a101
3
+ Version: 0.4.6a103
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.4.6-a101"
3
+ version = "0.4.6-a103"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -21,6 +21,7 @@ import queue
21
21
  import threading
22
22
  import time
23
23
  import hashlib
24
+ import requests
24
25
  from abc import ABC, abstractmethod
25
26
  from decimal import Decimal
26
27
  from functools import partial, wraps, reduce
@@ -263,6 +264,16 @@ class SyncRequest(ABC):
263
264
  Takes into account the run deadline and cancellation status.
264
265
  This is an alternative which can be used when the target API does not publish specific rate limits, and instead just asks you to respond to 429s as they are sent.
265
266
  """
267
+ if self.test_replay_mode:
268
+ # when in test replay mode, we want to make the same requests but without any waiting
269
+ return RateLimitedSession(
270
+ run_deadline=self._run_deadline,
271
+ thread_cancellation_token=self._thread_cancellation_token,
272
+ max_retries=max_retries,
273
+ backoff_factor=0,
274
+ statuses_to_include=statuses_to_include,
275
+ respect_retry_after_header=False
276
+ )
266
277
  return RateLimitedSession(
267
278
  run_deadline=self._run_deadline,
268
279
  thread_cancellation_token=self._thread_cancellation_token,
@@ -10,6 +10,7 @@ from email.utils import parsedate_to_datetime
10
10
  from logging import getLogger
11
11
  from typing import Any, List, Literal, Optional, Dict, Tuple
12
12
  import requests
13
+ import time
13
14
  from pydantic import Field, root_validator
14
15
  from pydantic.json import pydantic_encoder
15
16
  from .configuration import SubscriptableBaseModel
@@ -353,16 +354,24 @@ class RetryWithLogging(Retry):
353
354
  """
354
355
  Adding extra logs before making a retry request
355
356
  """
356
- def __call__(self, *args: Any, thread_cancellation_token:threading.Event, **kwargs: Any) -> Any:
357
- self.thread_cancellation_token = thread_cancellation_token
358
- return super().__call__(*args, **kwargs)
357
+ def __init__(self, *args: Any, **kwargs: Any) -> Any:
358
+ self.thread_cancellation_token:Optional[threading.Event] = None
359
+ return super().__init__(*args, **kwargs)
360
+
361
+ def new(self, **kw):
362
+ new_retry = super().new(**kw)
363
+ new_retry.thread_cancellation_token = self.thread_cancellation_token
364
+ return new_retry
359
365
 
360
366
  def sleep_for_retry(self, response=None):
361
367
  retry_after = self.get_retry_after(response)
362
368
  if retry_after:
363
369
  logger.info(f"Retrying after {retry_after} seconds due to Retry-After header")
364
- if self.thread_cancellation_token.wait(retry_after):
365
- raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
370
+ if self.thread_cancellation_token is None:
371
+ time.sleep(retry_after)
372
+ else:
373
+ if self.thread_cancellation_token.wait(retry_after):
374
+ raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
366
375
  return True
367
376
  return False
368
377
 
@@ -371,8 +380,11 @@ class RetryWithLogging(Retry):
371
380
  if backoff <= 0:
372
381
  return
373
382
  logger.info(f"Retrying after {backoff} seconds due to backoff time")
374
- if self.thread_cancellation_token.wait(backoff):
375
- raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
383
+ if self.thread_cancellation_token is None:
384
+ time.sleep(backoff)
385
+ else:
386
+ if self.thread_cancellation_token.wait(backoff):
387
+ raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
376
388
 
377
389
 
378
390
  class RateLimitedSession(requests.Session):
@@ -382,7 +394,13 @@ class RateLimitedSession(requests.Session):
382
394
  The thread_cancellation_token is observed when waiting, as well as the overall run deadline.
383
395
  In case this is used across threads, the retry count will be tracked per request URL (minus query parameters). It will be cleared when the request is successful.
384
396
  """
385
- def __init__(self, run_deadline:datetime.datetime, thread_cancellation_token:threading.Event, max_retries=5, backoff_factor=1, statuses_to_include:List[int] = [429]):
397
+ def __init__(self,
398
+ run_deadline:datetime.datetime,
399
+ thread_cancellation_token:threading.Event,
400
+ max_retries=5,
401
+ backoff_factor=1,
402
+ statuses_to_include:List[int] = [429],
403
+ respect_retry_after_header:bool = True):
386
404
  super().__init__()
387
405
  self.max_retries = max_retries
388
406
  self.backoff_factor = backoff_factor
@@ -396,8 +414,10 @@ class RateLimitedSession(requests.Session):
396
414
  total=max_retries,
397
415
  backoff_factor=backoff_factor,
398
416
  status_forcelist=statuses_to_include,
399
- allowed_methods=["HEAD", "GET", "OPTIONS", "POST", "PUT", "DELETE"]
417
+ allowed_methods=["HEAD", "GET", "OPTIONS", "POST", "PUT", "DELETE"],
418
+ respect_retry_after_header=respect_retry_after_header
400
419
  )
420
+ retry_strategy.thread_cancellation_token = thread_cancellation_token
401
421
  adapter = HTTPAdapter(max_retries=retry_strategy)
402
422
  self.mount("https://", adapter)
403
423
  self.mount("http://", adapter)