brynq-sdk-task-scheduler 4.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ from .task_scheduler import TaskScheduler
@@ -0,0 +1,431 @@
1
+ import sys
2
+ import os
3
+ import datetime
4
+ import inspect
5
+ import time
6
+ from typing import Literal, List, Optional
7
+ import traceback
8
+ import pandas as pd
9
+ import json
10
+ import pymysql
11
+ import requests
12
+ from brynq_sdk_mandrill import MailClient
13
+ from brynq_sdk_functions import Functions
14
+ from brynq_sdk_mysql import MySQL
15
+ from brynq_sdk_brynq import BrynQ
16
+ import warnings
17
+ import re
18
+
19
+
20
+ class TaskScheduler:
21
+
22
+ def __init__(self, data_interface_id: int = None, loglevel: str = 'INFO', email_after_errors: bool = False):
23
+ """
24
+ The TaskScheduler is responsible for the logging to the database. Based on this logging, the next reload will
25
+ start or not and warning will be given or not
26
+ :param data_interface_id: The ID from the task as saved in the task_scheduler table in the customer database
27
+ :param email_after_errors: a True or False value. When True, there will be send an email to a contactperson of the customer (as given in the database) with the number of errors
28
+ :param loglevel: Chose on which level you want to store the logs. Default is INFO. that means that a logline
29
+ :param disable_logging: If the interface is started from a local instance, logs will not be stored by default. If this is set to True, the logs will be stored in the database
30
+ with level DEBUG not is stored
31
+ """
32
+ # If the task is started via the task_scheduler, the following parameters will be passed by the scheduler.
33
+ # The distinction between local and non local is made because the scheduler usually sets the scheduler_log table entry and run_id. When running locally, the tasks will not log anything by default.
34
+ if any(flag in sys.argv[0].split('/') for flag in ['opt', 'home', 'Users']):
35
+ self.started_local = True
36
+ self.run_id = int(round(time.time() * 100000))
37
+ self.task_id = None
38
+ self.data_interface_id = data_interface_id
39
+ # set it into environment variables for reuse in source and target systems
40
+ os.environ['DATA_INTERFACE_ID'] = str(self.data_interface_id)
41
+ self.mysql_enabled = False
42
+ print("You're running locally, so your task won't be started in the platform and logs won't be saved")
43
+ else:
44
+ self.started_local = False
45
+ self.customer_db, self.task_id, self.run_id, self.data_interface_id = sys.argv[1:5]
46
+ self.mysql_enabled = True
47
+ # If the task is started locally, the parameters should be set locally
48
+
49
+ print(f"Run ID: {self.run_id}, Data Interface ID: {self.data_interface_id}, Task ID: {self.task_id}, Started Locally: {self.started_local}")
50
+ self.brynq = BrynQ()
51
+ self.email_after_errors = email_after_errors
52
+ # Set up local log directory
53
+ self.local_log_dir = 'local_logs'
54
+ os.makedirs(self.local_log_dir, exist_ok=True)
55
+
56
+ try:
57
+ self.customer_db = os.getenv("MYSQL_DATABASE")
58
+ self.customer = os.getenv('BRYNQ_SUBDOMAIN').lower().replace(' ', '_')
59
+ self.partner_id = os.getenv('PARTNER_ID').lower().replace(' ', '_') if os.getenv('PARTNER_ID') else 'brynq'
60
+ self.loglevel = loglevel
61
+ self.started_at = datetime.datetime.now()
62
+ self.error_count = 0
63
+
64
+ # Check if the log tables exists in the customer database1. If not, create them
65
+ # Mysql throws a warning when a table already exists. We don't care so we ignore warnings. (not exceptions!)
66
+ warnings.filterwarnings('ignore')
67
+
68
+ # Initialize MySQL
69
+ self.mysql_reachable = True
70
+ try:
71
+ self.mysql = MySQL()
72
+ # override connection credentials always because they mightve been retrieved from an interface credential
73
+ self.mysql.host = os.getenv("MYSQL_HOST")
74
+ self.mysql.user = os.getenv("MYSQL_USER")
75
+ self.mysql.password = os.getenv("MYSQL_PASSWORD")
76
+ self.mysql.database = os.getenv("MYSQL_DATABASE")
77
+ self.mysql.port = 3306 if os.getenv("MYSQL_PORT") is None else int(os.getenv("MYSQL_PORT"))
78
+ self.mysql.ping()
79
+ except Exception as e:
80
+ self.mysql_reachable = False
81
+ self.mysql = None
82
+ print("MySQL is not reachable, logs will be saved locally if needed.")
83
+
84
+ # Start the task and setup the data in the database
85
+ if self.mysql_enabled and self.mysql_reachable:
86
+ self.customer_id = self.mysql.raw_query(f'SELECT id FROM sc.customers WHERE dbname = \'{self.customer_db}\'')[0][0]
87
+ self._process_local_mysql_logs()
88
+ self._start_task()
89
+ except Exception as e:
90
+ self.error_handling(e)
91
+
92
+ def __count_keys(self, json_obj):
93
+ if not isinstance(json_obj, dict):
94
+ return 0
95
+ key_count = 0
96
+ for key, value in json_obj.items():
97
+ if not isinstance(value, dict):
98
+ key_count += 1 # Count the current key
99
+ else:
100
+ key_count += self.__count_keys(value) # Recursively count keys in nested dictionaries
101
+ return key_count
102
+
103
+ def __get_caller_info(self):
104
+ stack = inspect.stack()
105
+ caller_frame = stack[2][0]
106
+ file_name = caller_frame.f_code.co_filename
107
+ line_number = caller_frame.f_lineno
108
+ function_name = stack[2][3]
109
+ return file_name, line_number, function_name
110
+
111
+ def create_task_execution_steps(self, step_details: list):
112
+ """
113
+ Check if the given steps already exists in the task_execution_steps table. If not, update or insert the values in the table
114
+ :param step_details: list of dicts. Each dict must contain task details according to required_fields.
115
+ Example: step_details = [
116
+ {'nr': 1, 'description': 'test'},
117
+ {'nr': 2, 'description': 'test2'}
118
+ ]
119
+ :return: error (str) or response of mysql
120
+ """
121
+ warnings.warn("Execution steps are deprecated, please stop calling this method. It does nothing anymore", DeprecationWarning)
122
+
123
+ def _start_task(self):
124
+ """
125
+ Start the task and write this to the database. While the status is running, the task will not start again
126
+ :return: if the update to the database is successful or not
127
+ """
128
+ # If the task is started from a local instance (not the task_scheduler), create a start log row in the task_scheduler_log
129
+ if self.started_local:
130
+ self.mysql.raw_query(f"INSERT INTO `task_scheduler_log` (reload_id, task_id, reload_status, started_at, finished_at) VALUES ({self.run_id}, {self.task_id}, 'Running', '{self.started_at}', null)", insert=True)
131
+
132
+ self.mysql.update('task_scheduler', ['status', 'step_nr', 'run_instant'], ['RUNNING', 1, 0], f'WHERE `id` = {self.task_id}')
133
+
134
+ def db_variable(self, variable_name: str):
135
+ """
136
+ Get a value from the task_variables table corresponding with the given name. If temp value is filled, it will
137
+ (run_instant = 1), then the temp_value will be returned. This is to give the possibility for users in the frontend to run
138
+ a task once manual with other values then normal without overwriting the normal values.
139
+ :param variable_name: the name of the variable
140
+ :return: the value of the given variable.
141
+ """
142
+ warnings.deprecated("Use self.brynq.interfaces.get_variables() instead")
143
+
144
+ variable = self.brynq.interfaces.get_variables(variable_name=variable_name)
145
+
146
+ return variable
147
+
148
+ def write_execution_log(self, message: str, data = None, loglevel: str = 'INFO', full_extract: bool = False):
149
+ """
150
+ Writes messages to the database. Give the message and the level of the log
151
+ :param message: A string with a message for the log
152
+ :param loglevel: You can choose between DEBUG, INFO, ERROR or CRITICAL (DEBUG is most granulated, CRITICAL the less)
153
+ :param full_extract: If the data is a full load, set this to True. This will prevent the payload from being logged in ElasticSearch
154
+ :return: If writing to the database is successful or not
155
+ """
156
+ # Validate if the provided loglevel is valid
157
+ allowed_loglevels = ['DEBUG', 'INFO', 'ERROR', 'CRITICAL']
158
+ if loglevel not in allowed_loglevels:
159
+ raise ValueError(f"You\'ve entered a not allowed loglevel. Choose one of: {','.join(allowed_loglevels)}")
160
+
161
+ # Get the linenumber from where the logline is executed.
162
+ file_name, line_number, function_name = self.__get_caller_info()
163
+ print('{} at line: {}'.format(message, line_number))
164
+
165
+ # Count the errors for relevant log levels
166
+ if loglevel == 'ERROR' or loglevel == 'CRITICAL':
167
+ self.error_count += 1
168
+
169
+ # Write the logline to the MYSQL database, depends on the chosen loglevel in the task
170
+ if self.mysql_enabled:
171
+ mysql_log_data = {
172
+ 'reload_id': self.run_id,
173
+ 'task_id': self.task_id,
174
+ 'log_level': loglevel,
175
+ 'created_at': datetime.datetime.now(),
176
+ 'line_number': line_number,
177
+ 'message': re.sub("[']", '', message)
178
+ }
179
+ if self.mysql_reachable:
180
+ try:
181
+ query = f"INSERT INTO `task_execution_log` (reload_id, task_id, log_level, created_at, line_number, message) VALUES ({mysql_log_data['reload_id']}, {mysql_log_data['task_id']}, '{mysql_log_data['log_level']}', '{mysql_log_data['created_at']}', {mysql_log_data['line_number']}, '{mysql_log_data['message']}')"
182
+ if self.loglevel == 'DEBUG' or (self.loglevel == 'INFO' and loglevel != 'DEBUG') or (self.loglevel == 'ERROR' and loglevel in ['ERROR', 'CRITICAL']) or (self.loglevel == 'CRITICAL' and loglevel == 'CRITICAL'):
183
+ self.mysql.raw_query(query, insert=True)
184
+ except (pymysql.err.OperationalError, pymysql.err.InterfaceError) as e:
185
+ print(f"MySQL connection lost or closed during logging: {e}")
186
+ self.mysql_reachable = False
187
+ self._save_log_locally(mysql_log_data, 'mysql')
188
+ except Exception as e:
189
+ print(f"Error during logging to MySQL: {e}")
190
+ self._save_log_locally(mysql_log_data, 'mysql')
191
+ else:
192
+ self._save_log_locally(mysql_log_data, 'mysql')
193
+
194
+ def update_execution_step(self, step_number: int):
195
+ """
196
+ Update the current step number in the task_scheduler table so that user's in the frontend of BrynQ can see where a task is at any moment
197
+ :param step_number: Give only a number
198
+ :return: nothing
199
+ """
200
+ # Update the step number in the task_scheduler table
201
+ warnings.warn("Execution steps are deprecated, please stop calling this method. It does nothing anymore", DeprecationWarning)
202
+
203
+ def error_handling(self, e: Exception, breaking=True, send_to_teams=False):
204
+ """
205
+ This function handles errors that occur in the scheduler. Logs the traceback, updates run statuses and notifies users
206
+ :param e: the Exception that is to be handled
207
+ :param task_id: The scheduler task id
208
+ :param mysql_con: The connection which is used to update the scheduler task status
209
+ :param logger: The logger that is used to write the logging status to
210
+ :param breaking: Determines if the error is breaking or code will continue
211
+ :param started_at: Give the time the task is started
212
+ :return: nothing
213
+ """
214
+ # Get the linenumber from where the logline is executed.
215
+ file_name, line_number, function_name = self.__get_caller_info()
216
+
217
+ # Format error to a somewhat readable format
218
+ exc_type, exc_obj, exc_tb = sys.exc_info()
219
+ error = str(e)[:400].replace('\'', '').replace('\"', '') + ' | Line: {}'.format(exc_tb.tb_lineno)
220
+
221
+ if self.mysql_enabled:
222
+ try:
223
+ now = datetime.datetime.now()
224
+ # Log to log table in the database
225
+ mysql_log_data = {
226
+ 'reload_id': self.run_id,
227
+ 'task_id': self.task_id,
228
+ 'log_level': 'CRITICAL',
229
+ 'created_at': now,
230
+ 'line_number': exc_tb.tb_lineno,
231
+ 'message': error
232
+ }
233
+ self.error_count += 1
234
+ # Get scheduler task details for logging
235
+ task_details = \
236
+ self.mysql.select('task_scheduler, data_interfaces', 'data_interfaces.docker_image, data_interfaces.runfile_path', 'WHERE task_scheduler.data_interface_id = data_interfaces.id AND task_scheduler.id = {}'.format(self.task_id))[0]
237
+ taskname = task_details[0]
238
+ customer = task_details[1].split('/')[-1].split('.')[0]
239
+
240
+ query = f"INSERT INTO `task_execution_log` (reload_id, task_id, log_level, created_at, line_number, message) VALUES ({mysql_log_data['reload_id']}, {mysql_log_data['task_id']}, '{mysql_log_data['log_level']}', '{mysql_log_data['created_at']}', {mysql_log_data['line_number']}, '{mysql_log_data['message']}')"
241
+ self.mysql.raw_query(query, insert=True)
242
+
243
+ if send_to_teams:
244
+ Functions.send_error_to_teams(database=customer, task_number=self.task_id, task_title=taskname)
245
+ if breaking:
246
+ # Set scheduler status to failed
247
+ self.mysql.update('task_scheduler', ['status', 'last_reload', 'last_error_message', 'step_nr'],
248
+ ['IDLE', now, 'Failed', 0],
249
+ f'WHERE `id` = {self.task_id}')
250
+
251
+ self.mysql.update(table='task_scheduler_log',
252
+ columns=['reload_status', 'finished_at'],
253
+ values=['Failed', f'{now}'],
254
+ filter=f'WHERE `reload_id` = {self.run_id}')
255
+ if self.email_after_errors:
256
+ self.email_errors(failed=True)
257
+
258
+ # Remove the temp values from the variables table (legacy), if it doesn't exist, ignore the error. In that case it will already be flushed by the API endpoint
259
+ try:
260
+ self.mysql.raw_query(f'UPDATE `task_variables` SET temp_value = null WHERE task_id = {self.task_id}', insert=True)
261
+ except Exception as e:
262
+ pass
263
+
264
+ # Start the chained tasks if it there are tasks which should start if this one is failed
265
+ self.start_chained_tasks(finished_task_status='FAILED')
266
+ except (pymysql.err.OperationalError, pymysql.err.InterfaceError) as e:
267
+ print(f"MySQL connection lost or closed during logging: {e}")
268
+ self.mysql_reachable = False
269
+ self._save_log_locally(mysql_log_data, 'mysql')
270
+ except Exception as e:
271
+ print(f"Error during logging to MySQL: {e}")
272
+ self._save_log_locally(mysql_log_data, 'mysql')
273
+
274
+ # if breaking, reraise for clear traceback (local development) or just print if not breaking
275
+ if breaking:
276
+ raise e
277
+ else:
278
+ print(error)
279
+
280
+ def finish_task(self, reload_instant=False, log_limit: Optional[int] = 10000, log_date_limit: datetime.date = None):
281
+ """
282
+ At the end of the script, write the outcome to the database. Write if the task is finished with or without errors, Email to a contactperson if this variable is given in the
283
+ variables table. Also clean up the execution_log table when the number of lines is more than 1000
284
+ :param reload_instant: If the task should start again after it's finished
285
+ :param log_limit: The maximum number of logs to keep in the database. If the number of logs exceeds this limit, the oldest logs will be deleted.
286
+ :param log_date_limit: The date from which logs should be kept. If this is set, logs older than this date will be deleted.
287
+ :return:
288
+ """
289
+ if self.mysql_enabled:
290
+ # If reload instant is true, this adds an extra field 'run_instant' to the update query, and sets the value to 1. This makes the task reload immediately after it's finished
291
+ field = ['run_instant', 'next_reload'] if reload_instant else []
292
+ value = ['1', datetime.datetime.now()] if reload_instant else []
293
+ if self.error_count > 0:
294
+ self.mysql.update('task_scheduler', ['status', 'last_reload', 'last_error_message', 'step_nr'],
295
+ ['IDLE', datetime.datetime.now(), 'FinishedWithErrors', 0],
296
+ 'WHERE `id` = {}'.format(self.task_id))
297
+ self.mysql.update(table='task_scheduler_log',
298
+ columns=['reload_status', 'finished_at'],
299
+ values=['FinishedWithErrors', f'{datetime.datetime.now()}'],
300
+ filter=f'WHERE `reload_id` = {self.run_id}')
301
+ # If the variable self.send_mail_after_errors is set to True, send an email with the number of errors to the given user
302
+ if self.email_after_errors:
303
+ self.email_errors(failed=False)
304
+ else:
305
+ self.mysql.update(table='task_scheduler',
306
+ columns=['status', 'last_reload', 'last_error_message', 'step_nr', 'stopped_by_user'] + field,
307
+ values=['IDLE', datetime.datetime.now(), 'FinishedSucces', 0, 0] + value,
308
+ filter='WHERE `id` = {}'.format(self.task_id))
309
+
310
+ self.mysql.update(table='task_scheduler_log',
311
+ columns=['reload_status', 'finished_at'],
312
+ values=['FinishedSuccess', f'{datetime.datetime.now()}'],
313
+ filter=f'WHERE `reload_id` = {self.run_id}')
314
+
315
+ # Remove the temp values from the variables table if it exists (legacy)
316
+ try:
317
+ self.mysql.raw_query(f'UPDATE `task_variables` SET temp_value = null WHERE task_id = {self.task_id}', insert=True)
318
+ except Exception as e:
319
+ pass
320
+
321
+ # Start the new task if it there is a task which should start if this one is finished
322
+ self.start_chained_tasks(finished_task_status='SUCCESS')
323
+
324
+ # Clean up execution log
325
+ # set this date filter above the actual delete filter because of the many uncooperative quotation marks involved in the whole filter
326
+ print(f"Deleting logs with filters: Date limit: {log_date_limit}, Max no. of entries: {log_limit} INFO logs")
327
+ log_date_limit_filter = f"AND created_at >= \'{log_date_limit.strftime('%Y-%m-%d')}\'" if log_date_limit is not None else ''
328
+ log_limit_filter = f"LIMIT {log_limit}" if log_limit is not None else ""
329
+ delete_filter = f"WHERE task_id = {self.task_id} " \
330
+ f"AND log_level NOT IN ('CRITICAL', 'ERROR') " \
331
+ f"AND reload_id NOT IN (SELECT reload_id FROM (SELECT reload_id FROM `task_execution_log` WHERE task_id = {self.task_id} " \
332
+ f"{log_date_limit_filter} " \
333
+ f"ORDER BY created_at DESC {log_limit_filter}) temp)"
334
+ resp = self.mysql.delete(table="task_execution_log", filter=delete_filter)
335
+ print(resp)
336
+ print(f'{datetime.datetime.now()} - Task finished')
337
+
338
+ def start_chained_tasks(self, finished_task_status: str):
339
+ if self.mysql_enabled:
340
+ # only start chained tasks when trigger is on other task, otherwise this has changed in the db
341
+ filter = f'WHERE start_after_task_id = \'{self.task_id}\' AND start_after_preceding_task = \'{finished_task_status}\' AND task_type = \'OTHER_TASK\''
342
+ response = self.mysql.select(table='task_scheduler', selection='id', filter=filter)
343
+ if len(response) > 0:
344
+ tasks_to_run = [str(task[0]) for task in response]
345
+ self.mysql.update(table='task_scheduler', columns=['run_instant'], values=['1'], filter=f'WHERE id IN({",".join(tasks_to_run)})')
346
+ else:
347
+ print("Unable to start chained tasks, MySQL is disabled")
348
+
349
+ def email_errors(self, failed):
350
+ # The mails to email to should be stored in the task_variables table with the variable email_errors_to
351
+ email_variable = self.brynq.interfaces.get_variables(variable_name='email_errors_to')
352
+ if email_variable is not None:
353
+ email_to = email_variable.split(',')
354
+ if isinstance(email_to, list):
355
+ # The email_errors_to variable is a simple string. Convert it to a list and add a name because mandrill is asking for it
356
+ email_list = []
357
+ for i in email_to:
358
+ email_list.append({'name': 'BrynQ User', 'mail': i.strip()})
359
+
360
+ # Recieve the task name and the finished_at time from the task_scheduler table joined with the data_interfaces table
361
+ response = self.mysql.select(
362
+ table='task_scheduler LEFT JOIN data_interfaces ON task_scheduler.data_interface_id = data_interfaces.id ',
363
+ selection="title, last_reload",
364
+ filter=f'WHERE task_scheduler.id = {self.task_id}'
365
+ )
366
+ task = response[0][0]
367
+ finished_at = response[0][1]
368
+
369
+ # Set the content of the mail and all other stuff
370
+ if failed:
371
+ subject = f'Task \'{task}\' has failed'
372
+ content = f'Task \'{task}\' with task ID \'{self.task_id}\' failed during its last run and was stopped at {finished_at}. ' \
373
+ f'The task is failed. ' \
374
+ f'to visit the BrynQ scheduler, click here: <a href="https://app.brynq.com/interfaces/">here</a>. Here you can find the logs and find more information on why this task had failed.'
375
+ else:
376
+ subject = f'Task \'{task}\' is finished with errors'
377
+ content = f'Task \'{task}\' with ID \'{self.task_id}\' has runned and is finished at {finished_at}. ' \
378
+ f'The task is finished with {self.error_count} errors. ' \
379
+ f'to visit the BrynQ scheduler, click here: <a href="https://app.brynq.com/interfaces/">here</a>. Here you can find the logs and find more information on why this task had some errors.'
380
+ MailClient().send_mail(email_to=email_list, subject=subject, content=content, language='EN')
381
+
382
+ def _save_log_locally(self, payload, system):
383
+ log_file_path = os.path.join(self.local_log_dir, f'{system}_log_{self.run_id}.json')
384
+ try:
385
+ if os.path.exists(log_file_path):
386
+ with open(log_file_path, 'r') as f:
387
+ logs = json.load(f)
388
+ else:
389
+ logs = []
390
+ logs.append(payload)
391
+ with open(log_file_path, 'w') as f:
392
+ json.dump(logs, f, default=str)
393
+ except Exception as e:
394
+ print(f"Error saving log locally: {e}")
395
+
396
+ def _process_local_mysql_logs(self):
397
+ mysql_log_files = [f for f in os.listdir(self.local_log_dir) if f.startswith('mysql_log_')]
398
+ for log_file in mysql_log_files:
399
+ log_file_path = os.path.join(self.local_log_dir, log_file)
400
+ try:
401
+ with open(log_file_path, 'r') as f:
402
+ logs = json.load(f)
403
+ # Process logs
404
+ for log_entry in logs:
405
+ self._write_log_to_mysql(log_entry)
406
+ # Remove the log file after processing
407
+ os.remove(log_file_path)
408
+ except Exception as e:
409
+ print(f"Error processing MySQL log file {log_file}: {e}")
410
+
411
+ def _write_log_to_mysql(self, log_entry):
412
+ """
413
+ Insert a log entry dictionary into the task_execution_log table in MySQL.
414
+ :param log_entry: dict with keys matching the columns of task_execution_log
415
+ """
416
+ try:
417
+ query = (
418
+ "INSERT INTO `task_execution_log` "
419
+ "(reload_id, task_id, log_level, created_at, line_number, message) "
420
+ "VALUES ({reload_id}, {task_id}, '{log_level}', '{created_at}', {line_number}, '{message}')"
421
+ ).format(
422
+ reload_id=log_entry['reload_id'],
423
+ task_id=log_entry['task_id'],
424
+ log_level=log_entry['log_level'],
425
+ created_at=log_entry['created_at'],
426
+ line_number=log_entry['line_number'],
427
+ message=str(log_entry['message']).replace("'", "")
428
+ )
429
+ self.mysql.raw_query(query, insert=True)
430
+ except Exception as e:
431
+ print(f"Error writing log entry to MySQL: {e}")
@@ -0,0 +1,19 @@
1
+ Metadata-Version: 2.4
2
+ Name: brynq_sdk_task_scheduler
3
+ Version: 4.0.8
4
+ Summary: Code to execute tasks in BrynQ.com with the task scheduler
5
+ Author: BrynQ
6
+ Author-email: support@brynq.com
7
+ License: BrynQ License
8
+ Requires-Dist: brynq-sdk-brynq<5,>=4
9
+ Requires-Dist: brynq-sdk-functions<3,>=2
10
+ Requires-Dist: brynq-sdk-mysql<4,>=3
11
+ Requires-Dist: brynq-sdk-mandrill<4,>=2
12
+ Dynamic: author
13
+ Dynamic: author-email
14
+ Dynamic: description
15
+ Dynamic: license
16
+ Dynamic: requires-dist
17
+ Dynamic: summary
18
+
19
+ Code to execute tasks in the BrynQ.com platform with the task scheduler
@@ -0,0 +1,6 @@
1
+ brynq_sdk_task_scheduler/__init__.py,sha256=sHbSeANkfAh0VgPjouuPQES2V1n1ppzjUeFaqUdgy_E,41
2
+ brynq_sdk_task_scheduler/task_scheduler.py,sha256=iAWB1Ydr4C4ShiFq8RZReOlT7itjYsqX9jyn42vNe_g,25306
3
+ brynq_sdk_task_scheduler-4.0.8.dist-info/METADATA,sha256=WK5xJ_8vi2BzaNoy_DHsu0ZkAwMyjBDknK7-uZdbUGs,549
4
+ brynq_sdk_task_scheduler-4.0.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
5
+ brynq_sdk_task_scheduler-4.0.8.dist-info/top_level.txt,sha256=MFxHZowVM2wMSGNPlF3IDgdJHhTgG_G4ccS76pXxMhw,25
6
+ brynq_sdk_task_scheduler-4.0.8.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ brynq_sdk_task_scheduler