py-adtools 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of py-adtools might be problematic. Click here for more details.

adtools/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  from .py_code import PyCodeBlock, PyFunction, PyClass, PyProgram
2
- from .evaluator import PyEvaluator
2
+ from .evaluator import PyEvaluator, PyEvaluatorForBigReturnedObject
3
3
  from .evaluator_pool import EvaluatorExecutorPool
adtools/evaluator.py CHANGED
@@ -7,16 +7,32 @@ Commercial use of this software or its derivatives requires prior written permis
7
7
 
8
8
  import multiprocessing
9
9
  import os
10
+ import pickle
10
11
  import sys
11
12
  import time
12
13
  from abc import ABC, abstractmethod
14
+ from multiprocessing import shared_memory
13
15
  from queue import Empty
14
16
  from typing import Any, Literal, Dict, Callable, List, Tuple
17
+ import multiprocessing.managers
15
18
  import psutil
16
19
  import traceback
17
20
 
18
21
  from .py_code import PyProgram
19
22
 
23
+ __all__ = ['PyEvaluator', 'PyEvaluatorForBigReturnedObject']
24
+
25
+
26
+ def _set_mp_start_method(multiprocessing_start_method: Literal['default', 'auto', 'fork', 'spawn']):
27
+ if multiprocessing_start_method == 'auto':
28
+ # Force macOS and Linux use 'fork' to generate new process
29
+ if sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
30
+ multiprocessing.set_start_method('fork', force=True)
31
+ elif multiprocessing_start_method == 'fork':
32
+ multiprocessing.set_start_method('fork', force=True)
33
+ elif multiprocessing_start_method == 'spawn':
34
+ multiprocessing.set_start_method('spawn', force=True)
35
+
20
36
 
21
37
  class PyEvaluator(ABC):
22
38
 
@@ -99,43 +115,38 @@ class PyEvaluator(ABC):
99
115
  program: the program to be evaluated.
100
116
  **kwargs: additional keyword arguments to pass to 'evaluate_program'.
101
117
  """
102
- try:
103
- # Parse to program instance
104
- if isinstance(program, str):
105
- program = PyProgram.from_text(program)
106
- function_names = [f.name for f in program.functions]
107
- class_names = [c.name for c in program.classes]
108
-
109
- # Execute the code and get callable instances
110
- if self.exec_code:
111
- all_globals_namespace = {}
112
- # Execute the program, map func/var/class to global namespace
113
- exec(str(program), all_globals_namespace)
114
- # Get callable functions
115
- callable_funcs_list = [all_globals_namespace[f_name] for f_name in function_names]
116
- callable_funcs_dict = dict(zip(function_names, callable_funcs_list))
117
- # Get callable classes
118
- callable_cls_list = [all_globals_namespace[c_name] for c_name in class_names]
119
- callable_cls_dict = dict(zip(class_names, callable_cls_list))
120
- else:
121
- callable_funcs_list, callable_funcs_dict, callable_cls_list, callable_cls_dict = (
122
- None, None, None, None
123
- )
124
-
125
- # Get evaluate result
126
- res = self.evaluate_program(
127
- str(program),
128
- callable_funcs_dict,
129
- callable_funcs_list,
130
- callable_cls_dict,
131
- callable_cls_list,
132
- **kwargs
118
+ # Parse to program instance
119
+ if isinstance(program, str):
120
+ program = PyProgram.from_text(program)
121
+ function_names = [f.name for f in program.functions]
122
+ class_names = [c.name for c in program.classes]
123
+
124
+ # Execute the code and get callable instances
125
+ if self.exec_code:
126
+ all_globals_namespace = {}
127
+ # Execute the program, map func/var/class to global namespace
128
+ exec(str(program), all_globals_namespace)
129
+ # Get callable functions
130
+ callable_funcs_list = [all_globals_namespace[f_name] for f_name in function_names]
131
+ callable_funcs_dict = dict(zip(function_names, callable_funcs_list))
132
+ # Get callable classes
133
+ callable_cls_list = [all_globals_namespace[c_name] for c_name in class_names]
134
+ callable_cls_dict = dict(zip(class_names, callable_cls_list))
135
+ else:
136
+ callable_funcs_list, callable_funcs_dict, callable_cls_list, callable_cls_dict = (
137
+ None, None, None, None
133
138
  )
134
- return res
135
- except Exception as e:
136
- if self.debug_mode:
137
- print(traceback.format_exc())
138
- return None
139
+
140
+ # Get evaluate result
141
+ res = self.evaluate_program(
142
+ str(program),
143
+ callable_funcs_dict,
144
+ callable_funcs_list,
145
+ callable_cls_dict,
146
+ callable_cls_list,
147
+ **kwargs
148
+ )
149
+ return res
139
150
 
140
151
  def _evaluate_in_safe_process(
141
152
  self,
@@ -177,14 +188,7 @@ class PyEvaluator(ABC):
177
188
  Returns the evaluation results. If the 'get_evaluate_time' is True,
178
189
  the return value will be (Results, Time).
179
190
  """
180
- if multiprocessing_start_method == 'auto':
181
- # Force macOS and Linux use 'fork' to generate new process
182
- if sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
183
- multiprocessing.set_start_method('fork', force=True)
184
- elif multiprocessing_start_method == 'fork':
185
- multiprocessing.set_start_method('fork', force=True)
186
- elif multiprocessing_start_method == 'spawn':
187
- multiprocessing.set_start_method('spawn', force=True)
191
+ _set_mp_start_method(multiprocessing_start_method)
188
192
 
189
193
  try:
190
194
  # Start evaluation process
@@ -203,14 +207,14 @@ class PyEvaluator(ABC):
203
207
  result = result_queue.get(timeout=timeout_seconds)
204
208
  # Calculate the evaluate time
205
209
  eval_time = time.time() - evaluate_start_time
206
- # After getting the result, terminate/kill the process
210
+ # After getting the result, terminate and kill the process
207
211
  self._kill_process_and_its_children(process)
208
212
  except Empty: # The queue is empty indicates a timeout
209
213
  # Calculate the evaluate time
210
214
  eval_time = time.time() - evaluate_start_time
211
215
  if self.debug_mode:
212
216
  print(f'DEBUG: the evaluation time exceeds {timeout_seconds}s.')
213
- # Terminate/kill all processes if timeout happens
217
+ # Terminate and kill all processes if timeout happens
214
218
  self._kill_process_and_its_children(process)
215
219
  result = None
216
220
  except Exception as e:
@@ -218,7 +222,7 @@ class PyEvaluator(ABC):
218
222
  eval_time = time.time() - evaluate_start_time
219
223
  if self.debug_mode:
220
224
  print(f'DEBUG: evaluation failed with exception:\n{traceback.format_exc()}')
221
- # Terminate/kill all processes if meet exceptions
225
+ # Terminate and kill all processes if meet exceptions
222
226
  self._kill_process_and_its_children(process)
223
227
  result = None
224
228
  else:
@@ -226,11 +230,346 @@ class PyEvaluator(ABC):
226
230
  result = result_queue.get()
227
231
  # Calculate the evaluate time
228
232
  eval_time = time.time() - evaluate_start_time
229
- # Terminate/kill all processes after evaluation
233
+ # Terminate and kill all processes after evaluation
230
234
  self._kill_process_and_its_children(process)
231
235
 
232
236
  return (result, eval_time) if get_evaluate_time else result
233
237
  except Exception as e:
234
238
  if self.debug_mode:
235
- print(traceback.format_exc())
239
+ traceback.print_exc()
236
240
  return None
241
+
242
+
243
+ class PyEvaluatorForBigReturnedObject(PyEvaluator):
244
+ def __init__(
245
+ self,
246
+ exec_code: bool = True,
247
+ find_and_kill_children_evaluation_process: bool = False,
248
+ debug_mode: bool = False,
249
+ *,
250
+ join_timeout_seconds: int = 10
251
+ ):
252
+ """Evaluator interface for evaluating the Python algorithm program. Override this class and implement
253
+ 'evaluate_program' method, then invoke 'self.evaluate()' or 'self.secure_evaluate()' for evaluation.
254
+
255
+ **Note:** This class supports the secure_evaluate to handle very big return object, e.g., Tensors.
256
+
257
+ Args:
258
+ exec_code: Using 'exec()' to execute the program code and obtain the callable functions and classes,
259
+ which will be passed to 'self.evaluate_program()'. Set this parameter to 'False' if you are going to
260
+ evaluate a Python scripy. Note that if the parameter is set to 'False', the arguments 'callable_...'
261
+ in 'self.evaluate_program()' will no longer be affective.
262
+ find_and_kill_children_evaluation_process: If using 'self.secure_evaluate', kill children processes
263
+ when they are terminated. Note that it is suggested to set to 'False' if the evaluation process
264
+ does not start new processes.
265
+ debug_mode: Debug mode.
266
+ join_timeout_seconds: Timeout in seconds to wait for the process to finish. Kill the process if timeout.
267
+ """
268
+ super().__init__(
269
+ exec_code,
270
+ find_and_kill_children_evaluation_process,
271
+ debug_mode,
272
+ join_timeout_seconds=join_timeout_seconds
273
+ )
274
+
275
+ @abstractmethod
276
+ def evaluate_program(
277
+ self,
278
+ program_str: str,
279
+ callable_functions_dict: Dict[str, Callable] | None,
280
+ callable_functions_list: List[Callable] | None,
281
+ callable_classes_dict: Dict[str, Callable] | None,
282
+ callable_classes_list: List[Callable] | None,
283
+ **kwargs
284
+ ) -> Any:
285
+ raise NotImplementedError(
286
+ 'Must provide an evaluator for a python program. '
287
+ 'Override this method in a subclass.'
288
+ )
289
+
290
+ def _evaluate_and_put_res_in_manager_dict(
291
+ self,
292
+ program_str: str,
293
+ result_dict: multiprocessing.managers.DictProxy,
294
+ signal_queue: multiprocessing.Queue,
295
+ redirect_to_devnull: bool,
296
+ **kwargs
297
+ ):
298
+ """Evaluate and store result in Manager().dict() (for large results)."""
299
+ if redirect_to_devnull:
300
+ with open(os.devnull, 'w') as devnull:
301
+ os.dup2(devnull.fileno(), sys.stdout.fileno())
302
+ os.dup2(devnull.fileno(), sys.stderr.fileno())
303
+ try:
304
+ # Evaluate and get results
305
+ res = self.evaluate(program_str, **kwargs)
306
+ # Write results into dict
307
+ result_dict['result'] = res
308
+ # Put a signal to queue to inform the parent process the evaluation has done
309
+ signal_queue.put(('ok', None))
310
+ except Exception as e:
311
+ if self.debug_mode:
312
+ traceback.print_exc()
313
+ # Write results into dict
314
+ result_dict['result'] = None
315
+ # Put a signal to queue to inform the parent process the evaluation has terminated
316
+ signal_queue.put(('error', str(e)))
317
+
318
+ def secure_evaluate(
319
+ self,
320
+ program: str | PyProgram,
321
+ timeout_seconds: int | float = None,
322
+ redirect_to_devnull: bool = False,
323
+ multiprocessing_start_method: Literal['default', 'auto', 'fork', 'spawn'] = 'auto',
324
+ get_evaluate_time: bool = False,
325
+ **kwargs
326
+ ):
327
+ """Evaluate program in a new process. This enables timeout restriction and output redirection.
328
+ Args:
329
+ program: the program to be evaluated.
330
+ timeout_seconds: return 'None' if the execution time exceeds 'timeout_seconds'.
331
+ redirect_to_devnull: redirect any output to '/dev/null'.
332
+ multiprocessing_start_method: start a process using 'fork' or 'spawn'. If set to 'auto',
333
+ the process will be started using 'fork' with Linux/macOS and 'spawn' with Windows.
334
+ If set to 'default', there will be no changes to system default.
335
+ get_evaluate_time: get evaluation time for this program.
336
+ **kwargs: additional keyword arguments to pass to 'evaluate_program'.
337
+ Returns:
338
+ Returns the evaluation results. If the 'get_evaluate_time' is True,
339
+ the return value will be (Results, Time).
340
+ """
341
+ _set_mp_start_method(multiprocessing_start_method)
342
+
343
+ with multiprocessing.Manager() as manager:
344
+ # Path a dictionary to the evaluation process to get maybe very big return objects
345
+ result_dict = manager.dict()
346
+ # Pass a queue to the evaluation process to get signals whether the evaluation terminates
347
+ signal_queue = multiprocessing.Queue()
348
+ # Start evaluation process
349
+ process = multiprocessing.Process(
350
+ target=self._evaluate_and_put_res_in_manager_dict,
351
+ args=(str(program), result_dict, signal_queue, redirect_to_devnull),
352
+ kwargs=kwargs,
353
+ )
354
+ evaluate_start_time = time.time()
355
+ process.start()
356
+
357
+ try:
358
+ if timeout_seconds is not None:
359
+ try:
360
+ # If there is timeout restriction, we try to get results before timeout
361
+ signal = signal_queue.get(timeout=timeout_seconds)
362
+ except Empty:
363
+ # Evaluation timeout happens, we return 'None' as well as the actual evaluate time
364
+ eval_time = time.time() - evaluate_start_time
365
+ if self.debug_mode:
366
+ print(f'DEBUG: evaluation time exceeds {timeout_seconds}s.')
367
+ # Terminate and kill all processes after evaluation
368
+ self._kill_process_and_its_children(process)
369
+ return (None, eval_time) if get_evaluate_time else None
370
+ else:
371
+ # If there is no timeout restriction, we wait until the evaluation terminates
372
+ signal = signal_queue.get()
373
+
374
+ # Calculate evaluation time and kill children processes
375
+ eval_time = time.time() - evaluate_start_time
376
+ # Terminate and kill all processes after evaluation
377
+ self._kill_process_and_its_children(process)
378
+
379
+ # The first element is 'ok' indicates that the evaluation terminate without exceptions
380
+ if signal[0] == 'ok':
381
+ # We get the evaluation results from 'manager.dict'
382
+ result = result_dict.get('result', None)
383
+ else:
384
+ # The evaluation failed for some reason, so we set the result to 'None'
385
+ if self.debug_mode:
386
+ print(f'DEBUG: child process error: {signal[1]}')
387
+ result = None
388
+ except:
389
+ # If there is any exception during above procedure, we set the result to None
390
+ eval_time = time.time() - evaluate_start_time
391
+ if self.debug_mode:
392
+ print(f'DEBUG: exception in manager evaluate:\n{traceback.format_exc()}')
393
+ # Terminate and kill all processes after evaluation
394
+ self._kill_process_and_its_children(process)
395
+ result = None
396
+
397
+ return (result, eval_time) if get_evaluate_time else result
398
+
399
+
400
+ class PyEvaluatorForBigReturnedObjectV2(PyEvaluator):
401
+
402
+ def __init__(
403
+ self,
404
+ exec_code: bool = True,
405
+ find_and_kill_children_evaluation_process: bool = False,
406
+ debug_mode: bool = False,
407
+ *,
408
+ join_timeout_seconds: int = 10
409
+ ):
410
+ """Evaluator interface for evaluating the Python algorithm program. Override this class and implement
411
+ 'evaluate_program' method, then invoke 'self.evaluate()' or 'self.secure_evaluate()' for evaluation.
412
+ Note: This class supports the secure_evaluate to handle very big return object, e.g., Tensors.
413
+
414
+ Args:
415
+ exec_code: Using 'exec()' to execute the program code and obtain the callable functions and classes,
416
+ which will be passed to 'self.evaluate_program()'. Set this parameter to 'False' if you are going to
417
+ evaluate a Python scripy. Note that if the parameter is set to 'False', the arguments 'callable_...'
418
+ in 'self.evaluate_program()' will no longer be affective.
419
+ find_and_kill_children_evaluation_process: If using 'self.secure_evaluate', kill children processes
420
+ when they are terminated. Note that it is suggested to set to 'False' if the evaluation process
421
+ does not start new processes.
422
+ debug_mode: Debug mode.
423
+ join_timeout_seconds: Timeout in seconds to wait for the process to finish. Kill the process if timeout.
424
+ """
425
+ super().__init__(
426
+ exec_code,
427
+ find_and_kill_children_evaluation_process,
428
+ debug_mode,
429
+ join_timeout_seconds=join_timeout_seconds
430
+ )
431
+
432
+ @abstractmethod
433
+ def evaluate_program(
434
+ self,
435
+ program_str: str,
436
+ callable_functions_dict: Dict[str, Callable] | None,
437
+ callable_functions_list: List[Callable] | None,
438
+ callable_classes_dict: Dict[str, Callable] | None,
439
+ callable_classes_list: List[Callable] | None,
440
+ **kwargs
441
+ ) -> Any:
442
+ """Evaluate a given program.
443
+ Args:
444
+ program_str: The raw program text.
445
+ callable_functions_dict: A dict maps function name to callable function.
446
+ callable_functions_list: A list of callable functions.
447
+ callable_classes_dict: A dict maps class name to callable class.
448
+ callable_classes_list: A list of callable classes.
449
+ Returns:
450
+ Returns the evaluation result.
451
+ """
452
+ raise NotImplementedError(
453
+ 'Must provide an evaluator for a python program. '
454
+ 'Override this method in a subclass.'
455
+ )
456
+
457
+ def _evaluate_and_put_res_in_shared_memory(
458
+ self,
459
+ program_str: str,
460
+ meta_queue: multiprocessing.Queue,
461
+ redirect_to_devnull: bool,
462
+ **kwargs
463
+ ):
464
+ """Evaluate and store result in shared memory (for large results)."""
465
+ # Redirect STDOUT and STDERR to '/dev/null'
466
+ if redirect_to_devnull:
467
+ with open(os.devnull, 'w') as devnull:
468
+ os.dup2(devnull.fileno(), sys.stdout.fileno())
469
+ os.dup2(devnull.fileno(), sys.stderr.fileno())
470
+
471
+ # Evaluate and get results
472
+ res = self.evaluate(program_str, **kwargs)
473
+
474
+ try:
475
+ # Dump the results to data
476
+ data = pickle.dumps(res, protocol=pickle.HIGHEST_PROTOCOL)
477
+ # Create shared memory with the size of data
478
+ shm = shared_memory.SharedMemory(create=True, size=len(data))
479
+ # Write data
480
+ shm.buf[:len(data)] = data
481
+ # Send back shm metadata (shared_mem_name, shared_mem_size) and put them into the queue
482
+ meta_queue.put((shm.name, len(data)))
483
+ # Child closes its handle
484
+ shm.close()
485
+ except Exception as data_pickle_error:
486
+ # Put the exception message to the queue
487
+ meta_queue.put((None, str(data_pickle_error)))
488
+
489
+ def secure_evaluate(
490
+ self,
491
+ program: str | PyProgram,
492
+ timeout_seconds: int | float = None,
493
+ redirect_to_devnull: bool = False,
494
+ multiprocessing_start_method: str = 'auto',
495
+ get_evaluate_time: bool = False,
496
+ **kwargs
497
+ ):
498
+ """Evaluate program in a new process. This enables timeout restriction and output redirection.
499
+ Args:
500
+ program: the program to be evaluated.
501
+ timeout_seconds: return 'None' if the execution time exceeds 'timeout_seconds'.
502
+ redirect_to_devnull: redirect any output to '/dev/null'.
503
+ multiprocessing_start_method: start a process using 'fork' or 'spawn'. If set to 'auto',
504
+ the process will be started using 'fork' with Linux/macOS and 'spawn' with Windows.
505
+ If set to 'default', there will be no changes to system default.
506
+ get_evaluate_time: get evaluation time for this program.
507
+ **kwargs: additional keyword arguments to pass to 'evaluate_program'.
508
+ Returns:
509
+ Returns the evaluation results. If the 'get_evaluate_time' is True,
510
+ the return value will be (Results, Time).
511
+ """
512
+ if multiprocessing_start_method == 'auto':
513
+ if sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
514
+ multiprocessing.set_start_method('fork', force=True)
515
+ elif multiprocessing_start_method == 'fork':
516
+ multiprocessing.set_start_method('fork', force=True)
517
+ elif multiprocessing_start_method == 'spawn':
518
+ multiprocessing.set_start_method('spawn', force=True)
519
+
520
+ meta_queue = multiprocessing.Queue()
521
+
522
+ process = multiprocessing.Process(
523
+ target=self._evaluate_and_put_res_in_shared_memory,
524
+ args=(str(program), meta_queue, redirect_to_devnull),
525
+ kwargs=kwargs,
526
+ )
527
+
528
+ evaluate_start_time = time.time()
529
+ process.start()
530
+
531
+ try:
532
+ if timeout_seconds is not None:
533
+ try:
534
+ # Try to get the metadata before timeout
535
+ meta = meta_queue.get(timeout=timeout_seconds)
536
+ except Empty:
537
+ # Evaluate timeout
538
+ eval_time = time.time() - evaluate_start_time
539
+ if self.debug_mode:
540
+ print(f'DEBUG: evaluation time exceeds {timeout_seconds}s.')
541
+ self._kill_process_and_its_children(process)
542
+ return (None, eval_time) if get_evaluate_time else None
543
+ else:
544
+ meta = meta_queue.get()
545
+
546
+ # Calculate evaluation time
547
+ eval_time = time.time() - evaluate_start_time
548
+ self._kill_process_and_its_children(process)
549
+
550
+ # If the first element in the queue is None,
551
+ # it means that the shared memory raises exceptions
552
+ if meta[0] is None:
553
+ if self.debug_mode:
554
+ print(f'DEBUG: shared memory failed with exception: {meta[1]}')
555
+ result = None
556
+ else:
557
+ # Read results from metadata
558
+ shm_name, size = meta
559
+ shm = shared_memory.SharedMemory(name=shm_name)
560
+ buf = bytes(shm.buf[:size])
561
+ # Load results from buffer
562
+ result = pickle.loads(buf)
563
+ shm.close()
564
+ try:
565
+ shm.unlink()
566
+ except FileNotFoundError:
567
+ pass
568
+ except Exception:
569
+ eval_time = time.time() - evaluate_start_time
570
+ if self.debug_mode:
571
+ print(f'DEBUG: exception in shared evaluate:\n{traceback.format_exc()}')
572
+ self._kill_process_and_its_children(process)
573
+ result = None
574
+
575
+ return (result, eval_time) if get_evaluate_time else result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: py-adtools
3
- Version: 0.1.8
3
+ Version: 0.1.9
4
4
  Summary: Useful tools for parsing and evaluating Python programs for LLM-based algorithm design.
5
5
  Home-page: https://github.com/RayZhhh/py-adtools
6
6
  Author: Rui Zhang
@@ -1,13 +1,13 @@
1
- adtools/__init__.py,sha256=kbxntZFeCcURiIypNOdMWyeKPdlzRsWOB-K7z6HNCsc,150
2
- adtools/evaluator.py,sha256=A33E0HmLsIMtMbdjRmfPzck9gDodJc96gvXR0cL_SR0,10577
1
+ adtools/__init__.py,sha256=2XOq8zj8mpejJFavtARmst_hih6qAtBPKBoNB0UWeqs,183
2
+ adtools/evaluator.py,sha256=v-tdE5emOa7o6615YN2Av7fuh5W5hIlHoSjc-rTO1uU,26334
3
3
  adtools/evaluator_pool.py,sha256=zfQ7DgAjpByqPM5SE1tRQT_HGXU5uwNntvguzrXaPEk,3258
4
4
  adtools/py_code.py,sha256=FZfkp-IZ4zpOjrWe6svKNJsQhVANaTTkE0l0mc4aMW8,14277
5
5
  adtools/lm/__init__.py,sha256=PZf5Lraly9rAWz-cxOSLCvZ9OZ4EI8aQEluetvNX8LM,146
6
6
  adtools/lm/lm_base.py,sha256=KtO7KTrrMW7oWN-BhncoIOsbOVQsSc-0gNCYtvR6Sog,1105
7
7
  adtools/lm/openai_api.py,sha256=LcfLkNOBrJTdsp0zcUjaCelIcQK5XknpHWrlB0S67_k,2390
8
8
  adtools/lm/vllm_server.py,sha256=BPZoTS77wNJDcJ_0FO2QFyZTf6WR0isYKMuTctqKEU8,12942
9
- py_adtools-0.1.8.dist-info/licenses/LICENSE,sha256=E5GGyecx3y5h2gcEGQloF-rDY9wbaef5IHjRsvtFbt8,1065
10
- py_adtools-0.1.8.dist-info/METADATA,sha256=22hW8kcx1OxxlcedTDYIX1EzfB79pRYEtU3cXptveE8,6386
11
- py_adtools-0.1.8.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
12
- py_adtools-0.1.8.dist-info/top_level.txt,sha256=X2kKzmJFDAKR2FWCij5pfMG9pVVjVUomyl4e-1VLXIk,8
13
- py_adtools-0.1.8.dist-info/RECORD,,
9
+ py_adtools-0.1.9.dist-info/licenses/LICENSE,sha256=E5GGyecx3y5h2gcEGQloF-rDY9wbaef5IHjRsvtFbt8,1065
10
+ py_adtools-0.1.9.dist-info/METADATA,sha256=Fnu9fmNWBgI8l4nuvrIwYHXGUu4vweTH8b_uziAB4sM,6386
11
+ py_adtools-0.1.9.dist-info/WHEEL,sha256=lTU6B6eIfYoiQJTZNc-fyaR6BpL6ehTzU3xGYxn2n8k,91
12
+ py_adtools-0.1.9.dist-info/top_level.txt,sha256=X2kKzmJFDAKR2FWCij5pfMG9pVVjVUomyl4e-1VLXIk,8
13
+ py_adtools-0.1.9.dist-info/RECORD,,