avtomatika 1.0b10__py3-none-any.whl → 1.0b11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- avtomatika/blueprint.py +8 -1
- avtomatika/executor.py +38 -18
- avtomatika/s3.py +2 -3
- avtomatika/services/worker_service.py +22 -19
- avtomatika/storage/base.py +14 -0
- avtomatika/storage/memory.py +11 -0
- avtomatika/storage/redis.py +16 -4
- {avtomatika-1.0b10.dist-info → avtomatika-1.0b11.dist-info}/METADATA +1 -2
- {avtomatika-1.0b10.dist-info → avtomatika-1.0b11.dist-info}/RECORD +12 -12
- {avtomatika-1.0b10.dist-info → avtomatika-1.0b11.dist-info}/WHEEL +0 -0
- {avtomatika-1.0b10.dist-info → avtomatika-1.0b11.dist-info}/licenses/LICENSE +0 -0
- {avtomatika-1.0b10.dist-info → avtomatika-1.0b11.dist-info}/top_level.txt +0 -0
avtomatika/blueprint.py
CHANGED
|
@@ -131,7 +131,14 @@ class StateMachineBlueprint:
|
|
|
131
131
|
self.name = name
|
|
132
132
|
self.api_endpoint = api_endpoint
|
|
133
133
|
self.api_version = api_version
|
|
134
|
-
self.data_stores: dict[str, AsyncDictStore] =
|
|
134
|
+
self.data_stores: dict[str, AsyncDictStore] = {}
|
|
135
|
+
if data_stores:
|
|
136
|
+
for ds_name, ds_data in data_stores.items():
|
|
137
|
+
if isinstance(ds_data, AsyncDictStore):
|
|
138
|
+
self.data_stores[ds_name] = ds_data
|
|
139
|
+
else:
|
|
140
|
+
self.data_stores[ds_name] = AsyncDictStore(ds_data)
|
|
141
|
+
|
|
135
142
|
self.handlers: dict[str, Callable] = {}
|
|
136
143
|
self.aggregator_handlers: dict[str, Callable] = {}
|
|
137
144
|
self.conditional_handlers: list[ConditionalHandler] = []
|
avtomatika/executor.py
CHANGED
|
@@ -238,6 +238,9 @@ class JobExecutor:
|
|
|
238
238
|
action_factory.sub_blueprint_to_run,
|
|
239
239
|
duration_ms,
|
|
240
240
|
)
|
|
241
|
+
elif job_state["current_state"] in blueprint.end_states:
|
|
242
|
+
status = JOB_STATUS_FINISHED if job_state["current_state"] == "finished" else JOB_STATUS_FAILED
|
|
243
|
+
await self._handle_terminal_reached(job_state, status, duration_ms)
|
|
241
244
|
|
|
242
245
|
except Exception as e:
|
|
243
246
|
# This catches errors within the handler's execution.
|
|
@@ -248,6 +251,40 @@ class JobExecutor:
|
|
|
248
251
|
if message_id in self._processing_messages:
|
|
249
252
|
self._processing_messages.remove(message_id)
|
|
250
253
|
|
|
254
|
+
async def _handle_terminal_reached(
|
|
255
|
+
self,
|
|
256
|
+
job_state: dict[str, Any],
|
|
257
|
+
status: str,
|
|
258
|
+
duration_ms: int,
|
|
259
|
+
) -> None:
|
|
260
|
+
job_id = job_state["id"]
|
|
261
|
+
current_state = job_state["current_state"]
|
|
262
|
+
logger.info(f"Job {job_id} reached terminal state '{current_state}' with status '{status}'")
|
|
263
|
+
|
|
264
|
+
await self.history_storage.log_job_event(
|
|
265
|
+
{
|
|
266
|
+
"job_id": job_id,
|
|
267
|
+
"state": current_state,
|
|
268
|
+
"event_type": "job_completed",
|
|
269
|
+
"duration_ms": duration_ms,
|
|
270
|
+
"context_snapshot": job_state,
|
|
271
|
+
},
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
job_state["status"] = status
|
|
275
|
+
await self.storage.save_job_state(job_id, job_state)
|
|
276
|
+
|
|
277
|
+
# Clean up S3 files if service is available
|
|
278
|
+
s3_service = self.engine.app.get(S3_SERVICE_KEY)
|
|
279
|
+
if s3_service:
|
|
280
|
+
task_files = s3_service.get_task_files(job_id)
|
|
281
|
+
if task_files:
|
|
282
|
+
create_task(task_files.cleanup())
|
|
283
|
+
|
|
284
|
+
await self._check_and_resume_parent(job_state)
|
|
285
|
+
event_type = "job_finished" if status == JOB_STATUS_FINISHED else "job_failed"
|
|
286
|
+
await self.engine.send_job_webhook(job_state, event_type)
|
|
287
|
+
|
|
251
288
|
async def _handle_transition(
|
|
252
289
|
self,
|
|
253
290
|
job_state: dict[str, Any],
|
|
@@ -270,28 +307,11 @@ class JobExecutor:
|
|
|
270
307
|
},
|
|
271
308
|
)
|
|
272
309
|
|
|
273
|
-
# When transitioning to a new state, reset the retry counter.
|
|
274
310
|
job_state["retry_count"] = 0
|
|
275
311
|
job_state["current_state"] = next_state
|
|
276
312
|
job_state["status"] = JOB_STATUS_RUNNING
|
|
277
313
|
await self.storage.save_job_state(job_id, job_state)
|
|
278
|
-
|
|
279
|
-
if next_state not in TERMINAL_STATES:
|
|
280
|
-
await self.storage.enqueue_job(job_id)
|
|
281
|
-
else:
|
|
282
|
-
logger.info(f"Job {job_id} reached terminal state {next_state}")
|
|
283
|
-
|
|
284
|
-
# Clean up S3 files if service is available
|
|
285
|
-
s3_service = self.engine.app.get(S3_SERVICE_KEY)
|
|
286
|
-
if s3_service:
|
|
287
|
-
task_files = s3_service.get_task_files(job_id)
|
|
288
|
-
if task_files:
|
|
289
|
-
# Run cleanup in background to not block response
|
|
290
|
-
create_task(task_files.cleanup())
|
|
291
|
-
|
|
292
|
-
await self._check_and_resume_parent(job_state)
|
|
293
|
-
event_type = "job_finished" if next_state == JOB_STATUS_FINISHED else "job_failed"
|
|
294
|
-
await self.engine.send_job_webhook(job_state, event_type)
|
|
314
|
+
await self.storage.enqueue_job(job_id)
|
|
295
315
|
|
|
296
316
|
async def _handle_dispatch(
|
|
297
317
|
self,
|
avtomatika/s3.py
CHANGED
|
@@ -335,12 +335,11 @@ class S3Service:
|
|
|
335
335
|
try:
|
|
336
336
|
self._store = S3Store(
|
|
337
337
|
bucket=self.config.S3_DEFAULT_BUCKET,
|
|
338
|
-
|
|
339
|
-
|
|
338
|
+
aws_access_key_id=self.config.S3_ACCESS_KEY,
|
|
339
|
+
aws_secret_access_key=self.config.S3_SECRET_KEY,
|
|
340
340
|
region=self.config.S3_REGION,
|
|
341
341
|
endpoint=self.config.S3_ENDPOINT_URL,
|
|
342
342
|
allow_http="http://" in self.config.S3_ENDPOINT_URL,
|
|
343
|
-
force_path_style=True,
|
|
344
343
|
)
|
|
345
344
|
self._semaphore = Semaphore(self.config.S3_MAX_CONCURRENCY)
|
|
346
345
|
logger.info(
|
|
@@ -104,7 +104,6 @@ class WorkerService:
|
|
|
104
104
|
|
|
105
105
|
job_id = result_payload.get("job_id")
|
|
106
106
|
task_id = result_payload.get("task_id")
|
|
107
|
-
result_data = result_payload.get("result", {})
|
|
108
107
|
|
|
109
108
|
if not job_id or not task_id:
|
|
110
109
|
raise ValueError("job_id and task_id are required")
|
|
@@ -113,25 +112,33 @@ class WorkerService:
|
|
|
113
112
|
if not job_state:
|
|
114
113
|
raise LookupError("Job not found")
|
|
115
114
|
|
|
115
|
+
result_status = result_payload.get("status", TASK_STATUS_SUCCESS)
|
|
116
|
+
worker_data_content = result_payload.get("data")
|
|
117
|
+
|
|
116
118
|
if job_state.get("status") == JOB_STATUS_WAITING_FOR_PARALLEL:
|
|
117
119
|
await self.storage.remove_job_from_watch(f"{job_id}:{task_id}")
|
|
118
|
-
job_state.setdefault("aggregation_results", {})[task_id] = result_data
|
|
119
120
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
branches.
|
|
121
|
+
def _update_parallel_results(state: dict[str, Any]) -> dict[str, Any]:
|
|
122
|
+
state.setdefault("aggregation_results", {})[task_id] = result_payload
|
|
123
|
+
branches = state.setdefault("active_branches", [])
|
|
124
|
+
if task_id in branches:
|
|
125
|
+
branches.remove(task_id)
|
|
126
|
+
|
|
127
|
+
if not branches:
|
|
128
|
+
state["status"] = JOB_STATUS_RUNNING
|
|
129
|
+
state["current_state"] = state["aggregation_target"]
|
|
130
|
+
return state
|
|
123
131
|
|
|
124
|
-
|
|
132
|
+
updated_job_state = await self.storage.update_job_state_atomic(job_id, _update_parallel_results)
|
|
133
|
+
|
|
134
|
+
if not updated_job_state.get("active_branches"):
|
|
125
135
|
logger.info(f"All parallel branches for job {job_id} have completed.")
|
|
126
|
-
job_state["status"] = JOB_STATUS_RUNNING
|
|
127
|
-
job_state["current_state"] = job_state["aggregation_target"]
|
|
128
|
-
await self.storage.save_job_state(job_id, job_state)
|
|
129
136
|
await self.storage.enqueue_job(job_id)
|
|
130
137
|
else:
|
|
138
|
+
remaining = len(updated_job_state["active_branches"])
|
|
131
139
|
logger.info(
|
|
132
|
-
f"Branch {task_id} for job {job_id} completed. Waiting for {
|
|
140
|
+
f"Branch {task_id} for job {job_id} completed. Waiting for {remaining} more.",
|
|
133
141
|
)
|
|
134
|
-
await self.storage.save_job_state(job_id, job_state)
|
|
135
142
|
|
|
136
143
|
return "parallel_branch_result_accepted"
|
|
137
144
|
|
|
@@ -148,14 +155,12 @@ class WorkerService:
|
|
|
148
155
|
"event_type": "task_finished",
|
|
149
156
|
"duration_ms": duration_ms,
|
|
150
157
|
"worker_id": authenticated_worker_id,
|
|
151
|
-
"context_snapshot": {**job_state, "result":
|
|
158
|
+
"context_snapshot": {**job_state, "result": result_payload},
|
|
152
159
|
},
|
|
153
160
|
)
|
|
154
161
|
|
|
155
|
-
result_status = result_data.get("status", TASK_STATUS_SUCCESS) # Default to success? Constant?
|
|
156
|
-
|
|
157
162
|
if result_status == TASK_STATUS_FAILURE:
|
|
158
|
-
return await self._handle_task_failure(job_state, task_id,
|
|
163
|
+
return await self._handle_task_failure(job_state, task_id, result_payload)
|
|
159
164
|
|
|
160
165
|
if result_status == TASK_STATUS_CANCELLED:
|
|
161
166
|
logger.info(f"Task {task_id} for job {job_id} was cancelled by worker.")
|
|
@@ -171,13 +176,11 @@ class WorkerService:
|
|
|
171
176
|
return "result_accepted_cancelled"
|
|
172
177
|
|
|
173
178
|
transitions = job_state.get("current_task_transitions", {})
|
|
174
|
-
result_status = result_data.get("status", TASK_STATUS_SUCCESS)
|
|
175
179
|
next_state = transitions.get(result_status)
|
|
176
180
|
|
|
177
181
|
if next_state:
|
|
178
182
|
logger.info(f"Job {job_id} transitioning based on worker status '{result_status}' to state '{next_state}'")
|
|
179
183
|
|
|
180
|
-
worker_data_content = result_data.get("data")
|
|
181
184
|
if worker_data_content and isinstance(worker_data_content, dict):
|
|
182
185
|
if "state_history" not in job_state:
|
|
183
186
|
job_state["state_history"] = {}
|
|
@@ -202,8 +205,8 @@ class WorkerService:
|
|
|
202
205
|
await self.storage.save_job_state(job_id, job_state)
|
|
203
206
|
return "result_accepted_failure"
|
|
204
207
|
|
|
205
|
-
async def _handle_task_failure(self, job_state: dict, task_id: str,
|
|
206
|
-
error_details =
|
|
208
|
+
async def _handle_task_failure(self, job_state: dict, task_id: str, result_payload: dict) -> str:
|
|
209
|
+
error_details = result_payload.get("error", {})
|
|
207
210
|
error_type = ERROR_CODE_TRANSIENT
|
|
208
211
|
error_message = "No error details provided."
|
|
209
212
|
|
avtomatika/storage/base.py
CHANGED
|
@@ -90,6 +90,20 @@ class StorageBackend(ABC):
|
|
|
90
90
|
"""
|
|
91
91
|
raise NotImplementedError
|
|
92
92
|
|
|
93
|
+
@abstractmethod
|
|
94
|
+
async def update_job_state_atomic(
|
|
95
|
+
self,
|
|
96
|
+
job_id: str,
|
|
97
|
+
update_callback: Any,
|
|
98
|
+
) -> dict[str, Any]:
|
|
99
|
+
"""Atomically update the state of a job using a callback function.
|
|
100
|
+
|
|
101
|
+
:param job_id: Unique identifier for the job.
|
|
102
|
+
:param update_callback: A callable that takes the current state and returns the updated state.
|
|
103
|
+
:return: The updated full state of the job.
|
|
104
|
+
"""
|
|
105
|
+
raise NotImplementedError
|
|
106
|
+
|
|
93
107
|
@abstractmethod
|
|
94
108
|
async def register_worker(
|
|
95
109
|
self,
|
avtomatika/storage/memory.py
CHANGED
|
@@ -62,6 +62,17 @@ class MemoryStorage(StorageBackend):
|
|
|
62
62
|
self._jobs[job_id].update(update_data)
|
|
63
63
|
return self._jobs[job_id]
|
|
64
64
|
|
|
65
|
+
async def update_job_state_atomic(
|
|
66
|
+
self,
|
|
67
|
+
job_id: str,
|
|
68
|
+
update_callback: Any,
|
|
69
|
+
) -> dict[str, Any]:
|
|
70
|
+
async with self._lock:
|
|
71
|
+
current_state = self._jobs.get(job_id, {})
|
|
72
|
+
updated_state = update_callback(current_state)
|
|
73
|
+
self._jobs[job_id] = updated_state
|
|
74
|
+
return updated_state
|
|
75
|
+
|
|
65
76
|
async def register_worker(
|
|
66
77
|
self,
|
|
67
78
|
worker_id: str,
|
avtomatika/storage/redis.py
CHANGED
|
@@ -97,6 +97,19 @@ class RedisStorage(StorageBackend):
|
|
|
97
97
|
update_data: dict[str, Any],
|
|
98
98
|
) -> dict[str, Any]:
|
|
99
99
|
"""Atomically update the job state in Redis using a transaction."""
|
|
100
|
+
|
|
101
|
+
def _merge(state: dict[str, Any]) -> dict[str, Any]:
|
|
102
|
+
state.update(update_data)
|
|
103
|
+
return state
|
|
104
|
+
|
|
105
|
+
return await self.update_job_state_atomic(job_id, _merge)
|
|
106
|
+
|
|
107
|
+
async def update_job_state_atomic(
|
|
108
|
+
self,
|
|
109
|
+
job_id: str,
|
|
110
|
+
update_callback: Any,
|
|
111
|
+
) -> dict[str, Any]:
|
|
112
|
+
"""Atomically update the job state in Redis using a transaction and callback."""
|
|
100
113
|
key = self._get_key(job_id)
|
|
101
114
|
|
|
102
115
|
async with self._redis.pipeline(transaction=True) as pipe:
|
|
@@ -105,12 +118,11 @@ class RedisStorage(StorageBackend):
|
|
|
105
118
|
await pipe.watch(key)
|
|
106
119
|
current_state_raw = await pipe.get(key)
|
|
107
120
|
current_state: dict[str, Any] = self._unpack(current_state_raw) if current_state_raw else {}
|
|
108
|
-
current_state
|
|
109
|
-
|
|
121
|
+
updated_state = update_callback(current_state)
|
|
110
122
|
pipe.multi()
|
|
111
|
-
pipe.set(key, self._pack(
|
|
123
|
+
pipe.set(key, self._pack(updated_state))
|
|
112
124
|
await pipe.execute()
|
|
113
|
-
return
|
|
125
|
+
return updated_state
|
|
114
126
|
except WatchError:
|
|
115
127
|
continue
|
|
116
128
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: avtomatika
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.0b11
|
|
4
4
|
Summary: A state-machine based orchestrator for long-running AI and other jobs.
|
|
5
5
|
Author-email: Dmitrii Gagarin <madgagarin@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/avtomatika-ai/avtomatika
|
|
@@ -58,7 +58,6 @@ Dynamic: license-file
|
|
|
58
58
|
|
|
59
59
|
[](https://opensource.org/licenses/MIT)
|
|
60
60
|
[](https://www.python.org/downloads/release/python-3110/)
|
|
61
|
-
[](https://github.com/avtomatika-ai/avtomatika/actions/workflows/ci.yml)
|
|
62
61
|
[](https://github.com/astral-sh/ruff)
|
|
63
62
|
|
|
64
63
|
Avtomatika is a powerful, state-driven engine for managing complex asynchronous workflows in Python. It provides a robust framework for building scalable and resilient applications by separating process logic from execution logic.
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
avtomatika/__init__.py,sha256=D5r3L-H06uxsY_wgfh7u9YR29QvZMer1BlvzjW9Umfo,701
|
|
2
2
|
avtomatika/api.html,sha256=6Sj0vwAUZsbLKwlB58ONAttCB52e8h3fidspLOwMMGE,32894
|
|
3
3
|
avtomatika/app_keys.py,sha256=Zd2TaGPduzyEFJgdPvgSH1skdBx2mX-Prj1ma9fAXRo,1275
|
|
4
|
-
avtomatika/blueprint.py,sha256=
|
|
4
|
+
avtomatika/blueprint.py,sha256=OPJShSdh8asl9G2kWzbFu1CKMzsq15fo37I0eYlISkg,12119
|
|
5
5
|
avtomatika/client_config_loader.py,sha256=zVVHZlxSqZUaNpZ4zoU0T1CFYXdxy-3vKSmPcaFuHSY,2772
|
|
6
6
|
avtomatika/compression.py,sha256=bhA1kw4YrCR3I3kdquZSY0fAzCrRrjtz55uepzLUDKI,2498
|
|
7
7
|
avtomatika/config.py,sha256=27ov8BNbiUpkZ1sjtx3pifRavwcxJ_zUgIdkL_pgqv8,3595
|
|
@@ -11,7 +11,7 @@ avtomatika/data_types.py,sha256=D_IUzMW8zMz-_MaqVp9MG53rG37Cb3McyRZuIXxvdlE,1108
|
|
|
11
11
|
avtomatika/datastore.py,sha256=gJjhZ5kxjF8pmbbPQb_qu3HPUpfy2c6T75KZ-smb_zg,545
|
|
12
12
|
avtomatika/dispatcher.py,sha256=5J5GBWFfaGCGXUkM-2fhMeg2n2nTO0BH3ffkzsnSsaE,8784
|
|
13
13
|
avtomatika/engine.py,sha256=Hb6MLanMjx1GDAfkbNJU-K4RXMuPZQP7_HA_0VR8WMw,20916
|
|
14
|
-
avtomatika/executor.py,sha256=
|
|
14
|
+
avtomatika/executor.py,sha256=bu8_Xmr_hRNsatAKUzypIFWOZQT2yE_gMU4XfGBt4u4,24923
|
|
15
15
|
avtomatika/health_checker.py,sha256=jXYSH4BPeZ4LCxSZV4uXM4BZhGJYgpoAOWQXE8yojLo,2078
|
|
16
16
|
avtomatika/logging_config.py,sha256=cVY8aOeaWncsvkN015WgC74NTF6r55-OA3E1ux8P824,3347
|
|
17
17
|
avtomatika/metrics.py,sha256=tiksK1fFSOMlz8zFu6GT19JTduvxMTNlLu0QFrTHoQI,1866
|
|
@@ -19,7 +19,7 @@ avtomatika/py.typed,sha256=CT_L7gw2MLcQY-X0vs-xB5Vr0wzvGo7GuQYPI_qwJE8,65
|
|
|
19
19
|
avtomatika/quota.py,sha256=DNcaL6k0J1REeP8sVqbY9FprY_3BSr2SxM2Vf4mEqdw,1612
|
|
20
20
|
avtomatika/ratelimit.py,sha256=hFGW5oN9G6_W_jnHmopXW8bRjjzlvanY19MLghsNLE8,1306
|
|
21
21
|
avtomatika/reputation.py,sha256=pK-x9FrPN2Oc2gtPa1AZJHlhvkd7xlRe4orxM2auJJc,3979
|
|
22
|
-
avtomatika/s3.py,sha256=
|
|
22
|
+
avtomatika/s3.py,sha256=Byc5C_KTo0mOErQRlhDJNPZplyqrWxKe4GSeU99Zaqk,14079
|
|
23
23
|
avtomatika/scheduler.py,sha256=F5Kv5Rx34nDd0mE5jxjwpjRg8duDZBEr91N5Y6CNR24,4231
|
|
24
24
|
avtomatika/scheduler_config_loader.py,sha256=38x-4G4yRrhSrLdmZ4aTb7WggE-BcGblKZO7x97nW6Y,1352
|
|
25
25
|
avtomatika/security.py,sha256=eENEUc0OsHm6wN2H-ckGmiaV9qrZSbYsHFCWyYb3aLs,3271
|
|
@@ -34,15 +34,15 @@ avtomatika/history/noop.py,sha256=hLzt0RblsrKUtoyQNauOni6jCi-IYCWEPsiR0vh7tho,12
|
|
|
34
34
|
avtomatika/history/postgres.py,sha256=T0XpDurnh48pPI-2JhB285GdNIexNkCSu8ExhLJzcxc,9538
|
|
35
35
|
avtomatika/history/sqlite.py,sha256=txWax9RVzBQzIZuU-SjHnEXEzBmGzIjqzoVsK2oyiAQ,9252
|
|
36
36
|
avtomatika/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
-
avtomatika/services/worker_service.py,sha256=
|
|
37
|
+
avtomatika/services/worker_service.py,sha256=lFHVqbXG-4v5Ec17FZCHaB6Uu8U2nWOGpPZOKjzwM00,11596
|
|
38
38
|
avtomatika/storage/__init__.py,sha256=mGRj_40dWZ7R7uYbqC6gCsUWCKHAbZz4ZVIhYg5dT_E,262
|
|
39
|
-
avtomatika/storage/base.py,sha256=
|
|
40
|
-
avtomatika/storage/memory.py,sha256=
|
|
41
|
-
avtomatika/storage/redis.py,sha256=
|
|
39
|
+
avtomatika/storage/base.py,sha256=54II8RfrEQzCT9NH_ECorM9SdvM-e5f-_MRtJInBczw,13856
|
|
40
|
+
avtomatika/storage/memory.py,sha256=DqUd7SQmneJCNd-YaWLQL-Gpz3FwRHFAH6xx2CIIqY4,14915
|
|
41
|
+
avtomatika/storage/redis.py,sha256=fN0e3_2CP-8H1WKTBljXeHMVkrzLXs8znE2YkKbjuy0,20658
|
|
42
42
|
avtomatika/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
43
43
|
avtomatika/utils/webhook_sender.py,sha256=LoJ6z_1p-OngjPYl9Pk1N1t9xrP6-v-7xOg_AmWPuVc,3644
|
|
44
|
-
avtomatika-1.
|
|
45
|
-
avtomatika-1.
|
|
46
|
-
avtomatika-1.
|
|
47
|
-
avtomatika-1.
|
|
48
|
-
avtomatika-1.
|
|
44
|
+
avtomatika-1.0b11.dist-info/licenses/LICENSE,sha256=tqCjw9Y1vbU-hLcWi__7wQstLbt2T1XWPdbQYqCxuWY,1072
|
|
45
|
+
avtomatika-1.0b11.dist-info/METADATA,sha256=3Ok1N9NHujsXKORqv28ZSZmfHpIJ3_GjWGDkXbVyGSU,28432
|
|
46
|
+
avtomatika-1.0b11.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
47
|
+
avtomatika-1.0b11.dist-info/top_level.txt,sha256=gLDWhA_wxHj0I6fG5X8vw9fE0HSN4hTE2dEJzeVS2x8,11
|
|
48
|
+
avtomatika-1.0b11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|