omnata-plugin-runtime 0.4.5a100__tar.gz → 0.4.6a102__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.4.5a100
3
+ Version: 0.4.6a102
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.4.5-a100"
3
+ version = "0.4.6-a102"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -21,6 +21,7 @@ import queue
21
21
  import threading
22
22
  import time
23
23
  import hashlib
24
+ import requests
24
25
  from abc import ABC, abstractmethod
25
26
  from decimal import Decimal
26
27
  from functools import partial, wraps, reduce
@@ -263,6 +264,16 @@ class SyncRequest(ABC):
263
264
  Takes into account the run deadline and cancellation status.
264
265
  This is an alternative which can be used when the target API does not publish specific rate limits, and instead just asks you to respond to 429s as they are sent.
265
266
  """
267
+ if self.test_replay_mode:
268
+ # when in test replay mode, we want to make the same requests but without any waiting
269
+ return RateLimitedSession(
270
+ run_deadline=self._run_deadline,
271
+ thread_cancellation_token=self._thread_cancellation_token,
272
+ max_retries=max_retries,
273
+ backoff_factor=0,
274
+ statuses_to_include=statuses_to_include,
275
+ respect_retry_after_header=False
276
+ )
266
277
  return RateLimitedSession(
267
278
  run_deadline=self._run_deadline,
268
279
  thread_cancellation_token=self._thread_cancellation_token,
@@ -8,8 +8,9 @@ import re
8
8
  import threading
9
9
  from email.utils import parsedate_to_datetime
10
10
  from logging import getLogger
11
- from typing import List, Literal, Optional, Dict, Tuple
11
+ from typing import Any, List, Literal, Optional, Dict, Tuple
12
12
  import requests
13
+ import time
13
14
  from pydantic import Field, root_validator
14
15
  from pydantic.json import pydantic_encoder
15
16
  from .configuration import SubscriptableBaseModel
@@ -349,6 +350,38 @@ class RetryLaterException(Exception):
349
350
  self.message = message
350
351
  super().__init__(self.message)
351
352
 
353
+ class RetryWithLogging(Retry):
354
+ """
355
+ Adding extra logs before making a retry request
356
+ """
357
+ def __init__(self, *args: Any, thread_cancellation_token:threading.Event, **kwargs: Any) -> Any:
358
+ self.thread_cancellation_token = thread_cancellation_token
359
+ return super().__init__(*args, **kwargs)
360
+
361
+ def sleep_for_retry(self, response=None):
362
+ retry_after = self.get_retry_after(response)
363
+ if retry_after:
364
+ logger.info(f"Retrying after {retry_after} seconds due to Retry-After header")
365
+ if self.thread_cancellation_token is None:
366
+ time.sleep(retry_after)
367
+ else:
368
+ if self.thread_cancellation_token.wait(retry_after):
369
+ raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
370
+ return True
371
+ return False
372
+
373
+ def _sleep_backoff(self):
374
+ backoff = self.get_backoff_time()
375
+ if backoff <= 0:
376
+ return
377
+ logger.info(f"Retrying after {backoff} seconds due to backoff time")
378
+ if self.thread_cancellation_token is None:
379
+ time.sleep(backoff)
380
+ else:
381
+ if self.thread_cancellation_token.wait(backoff):
382
+ raise InterruptedWhileWaitingException(message="The sync was interrupted while waiting for rate limiting to expire")
383
+
384
+
352
385
  class RateLimitedSession(requests.Session):
353
386
  """
354
387
  Creates a requests session that will automatically handle rate limiting.
@@ -356,7 +389,13 @@ class RateLimitedSession(requests.Session):
356
389
  The thread_cancellation_token is observed when waiting, as well as the overall run deadline.
357
390
  In case this is used across threads, the retry count will be tracked per request URL (minus query parameters). It will be cleared when the request is successful.
358
391
  """
359
- def __init__(self, run_deadline:datetime.datetime, thread_cancellation_token:threading.Event, max_retries=5, backoff_factor=1, statuses_to_include:List[int] = [429]):
392
+ def __init__(self,
393
+ run_deadline:datetime.datetime,
394
+ thread_cancellation_token:threading.Event,
395
+ max_retries=5,
396
+ backoff_factor=1,
397
+ statuses_to_include:List[int] = [429],
398
+ respect_retry_after_header:bool = True):
360
399
  super().__init__()
361
400
  self.max_retries = max_retries
362
401
  self.backoff_factor = backoff_factor
@@ -366,11 +405,13 @@ class RateLimitedSession(requests.Session):
366
405
  self.thread_cancellation_token = thread_cancellation_token
367
406
  self.statuses_to_include = statuses_to_include
368
407
 
369
- retry_strategy = Retry(
408
+ retry_strategy = RetryWithLogging(
409
+ thread_cancellation_token=thread_cancellation_token,
370
410
  total=max_retries,
371
411
  backoff_factor=backoff_factor,
372
412
  status_forcelist=statuses_to_include,
373
- allowed_methods=["HEAD", "GET", "OPTIONS", "POST", "PUT", "DELETE"]
413
+ allowed_methods=["HEAD", "GET", "OPTIONS", "POST", "PUT", "DELETE"],
414
+ respect_retry_after_header=respect_retry_after_header
374
415
  )
375
416
  adapter = HTTPAdapter(max_retries=retry_strategy)
376
417
  self.mount("https://", adapter)
@@ -398,7 +439,7 @@ class RateLimitedSession(requests.Session):
398
439
  def request(self, method, url, **kwargs):
399
440
  while True:
400
441
  response = super().request(method, url, **kwargs)
401
-
442
+ # TODO: this is probably all redundant as the Retry object should handle this at a lower level (urllib3)
402
443
  if response.status_code in self.statuses_to_include:
403
444
  if 'Retry-After' in response.headers:
404
445
  retry_after = response.headers['Retry-After']