ltc-client 0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ltc_client-0.2/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Tin Arm Engineering Ltd.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,28 @@
1
+ Metadata-Version: 2.1
2
+ Name: ltc-client
3
+ Version: 0.2
4
+ Summary: "Client and Worker module for Tin Arm Engineering LTC service"
5
+ Home-page: https://github.com/TinArmEngineering/ltc_client/
6
+ License: MIT
7
+ Author: Martin West
8
+ Author-email: martin@tinarmengineering.com
9
+ Requires-Python: >=3.10,<4.0
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Requires-Dist: Pint (>=0.24,<0.25)
17
+ Requires-Dist: numpy (>=2.0.0,<3.0.0)
18
+ Requires-Dist: pika (>=1.3.2,<2.0.0)
19
+ Requires-Dist: python-logging-rabbitmq (>=2.3.0,<3.0.0)
20
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
21
+ Requires-Dist: tqdm (>=4.66.4,<5.0.0)
22
+ Requires-Dist: webstompy (>=0.1.3,<0.2.0)
23
+ Project-URL: Repository, https://github.com/TinArmEngineering/ltc_client/
24
+ Description-Content-Type: text/markdown
25
+
26
+ # ltc_client
27
+ Node creation tool for TAE workers
28
+
@@ -0,0 +1,2 @@
1
+ # ltc_client
2
+ Node creation tool for TAE workers
@@ -0,0 +1,11 @@
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # from ltc_client.worker import StandardWorker, DefaultIdLogFilter, HostnameFilter
4
+ from ltc_client.api import Api, NameQuantityPair, Quantity, Unit
5
+ from ltc_client.helpers import Machine, Job, decode
6
+
7
+ __title__ = "TINARM - Node creation tool for TAE workers"
8
+ __version__ = "0.1"
9
+ __author__ = "Martin West, Chris Wallis"
10
+ __license__ = "MIT License"
11
+ __copyright__ = "Copyright 2023 Tin Arm Engineering Ltd."
@@ -0,0 +1,355 @@
1
+ import logging
2
+ import time
3
+ import requests
4
+ from math import prod
5
+ import pint
6
+
7
+ LOGGING_LEVEL = logging.INFO
8
+
9
+ JOB_STATUS = {
10
+ "New": 0,
11
+ "QueuedForMeshing": 10,
12
+ "WaitingForMesh": 20,
13
+ "QueuedForSimSetup": 21,
14
+ "SimSetup": 22,
15
+ "QueuedForMeshConversion": 25,
16
+ "MeshConversion": 26,
17
+ "QueuedForSolving": 30,
18
+ "Solving": 40,
19
+ "QueuedForPostProcess": 50,
20
+ "PostProcess": 60,
21
+ "Complete": 70,
22
+ "Quarantined": 80,
23
+ }
24
+
25
+ STATUS_JOB = {value: key for key, value in JOB_STATUS.items()}
26
+
27
+ ### Configure Logging
28
+ logger = logging.getLogger()
29
+ logger.setLevel(LOGGING_LEVEL)
30
+
31
+
32
+ class Unit:
33
+ def __init__(self, name: str, exponent: int):
34
+ self.name = name
35
+ self.exponent = exponent
36
+
37
+ def to_dict(self):
38
+ return {
39
+ "name": self.name,
40
+ "exponent": self.exponent,
41
+ }
42
+
43
+
44
+ class Quantity:
45
+ """
46
+ Represents a quantity with magnitude, units, and shape.
47
+
48
+ Args:
49
+ magnitude: The magnitude of the quantity. It can be a single value, a list-like object, or a numpy array.
50
+ or even a pint.Quantity
51
+ units (list[Unit]): A list of Unit objects representing the units of the quantity.
52
+ shape (Optional): The shape of the quantity. If not provided, it will be inferred from the magnitude.
53
+
54
+ Attributes:
55
+ magnitude: The magnitude of the quantity.
56
+ shape: The shape of the quantity.
57
+ units (list[Unit]): A list of Unit objects representing the units of the quantity.
58
+ """
59
+
60
+ def __init__(self, magnitude, units=[None], shape=None):
61
+ if isinstance(magnitude, pint.Quantity):
62
+ magnitude, units = magnitude.to_tuple()
63
+ if hasattr(magnitude, "shape"):
64
+ if shape is None:
65
+ self.shape = list(magnitude.shape)
66
+ self.magnitude = magnitude.flatten().tolist()
67
+ elif prod(shape) == magnitude.size:
68
+ self.magnitude = magnitude.tolist()
69
+ self.shape = shape
70
+ else:
71
+ raise ValueError(
72
+ f"Shape {shape} does not match magnitude size {magnitude.size}"
73
+ )
74
+
75
+ elif not hasattr(magnitude, "__len__"):
76
+ self.magnitude = [magnitude]
77
+ self.shape = [1]
78
+ elif shape is None:
79
+ self.shape = [len(magnitude)]
80
+ self.magnitude = magnitude
81
+ elif prod(shape) != len(magnitude):
82
+ raise ValueError(
83
+ f"Shape {shape} does not match magnitude size {len(magnitude)}"
84
+ )
85
+ else:
86
+ self.magnitude = magnitude
87
+ self.shape = shape
88
+
89
+ self.units = [Unit(*u) if type(u) != Unit else u for u in units]
90
+
91
+ def to_dict(self):
92
+ """
93
+ Converts the Quantity object to a dictionary.
94
+
95
+ Returns:
96
+ dict: A dictionary representation of the Quantity object.
97
+ """
98
+ return {
99
+ "magnitude": self.magnitude,
100
+ "shape": self.shape,
101
+ "units": [u.to_dict() for u in self.units],
102
+ }
103
+
104
+
105
+ class NameQuantityPair:
106
+ def __init__(self, section, name, value: Quantity):
107
+ self.section = section
108
+ self.name = name
109
+ self.value = value
110
+
111
+ def to_dict(self):
112
+ return {
113
+ "section": self.section,
114
+ "name": self.name,
115
+ "value": self.value.to_dict(),
116
+ }
117
+
118
+
119
+ class Api:
120
+ """
121
+ The TAE API
122
+ """
123
+
124
+ def __init__(self, root_url, api_key, org_id=None, node_id=None):
125
+ """
126
+ Initialize the API
127
+ """
128
+
129
+ self._root_url = root_url
130
+ self._api_key = api_key
131
+ self._org_id = org_id
132
+ self._node_id = node_id
133
+
134
+ logger.info(f"root_url: {self._root_url}")
135
+
136
+ def get_job(self, job_id):
137
+ """
138
+ Get a job from the TAE API
139
+ """
140
+ response = requests.get(
141
+ url=f"{self._root_url}/jobs/{job_id}?apikey={self._api_key}",
142
+ )
143
+ response.raise_for_status()
144
+ return response.json()
145
+
146
+ def create_job(self, job):
147
+ """
148
+ Create a job for the TAE API
149
+ """
150
+ response = requests.post(
151
+ url=f"{self._root_url}/jobs/?apikey={self._api_key}&org_id={self._org_id}",
152
+ json=job.to_api(),
153
+ )
154
+ response.raise_for_status()
155
+ if response.status_code == 200:
156
+ job.id = response.json()["id"]
157
+ return response.json()
158
+
159
+ def update_job_status(self, job_id, status, percentage_complete=None):
160
+ """
161
+ Update a job status
162
+ """
163
+ url = f"{self._root_url}/jobs/{job_id}/status/{status}?node_id={self._node_id}&apikey={self._api_key}&percentage_complete={percentage_complete}"
164
+ logger.info(f"Updating job status: {url}")
165
+
166
+ response = requests.put(url=url)
167
+ response.raise_for_status()
168
+ return response.json()
169
+
170
+ def get_job_artifact(self, job_id, artifact_id):
171
+ """
172
+ Get job artifact
173
+ """
174
+ job = self.get_job(job_id)
175
+ for artifact in job["artifacts"]:
176
+ if artifact["id"] == artifact_id:
177
+ return artifact
178
+
179
+ raise Exception(f"Artifact {artifact_id} not found on job {job_id}")
180
+
181
+ def get_promoted_job_artifact(self, job_id, artifact_id):
182
+ # Get the artifact
183
+ artifact = self.get_job_artifact(job_id, artifact_id)
184
+
185
+ # If the url starts with https, it's already promoted
186
+ if artifact["url"].startswith("https"):
187
+ return artifact
188
+
189
+ for i in range(0, 10):
190
+ time.sleep(5)
191
+ artifact = self.get_job_artifact(job_id, artifact_id)
192
+ if artifact["url"].startswith("https"):
193
+ return artifact
194
+
195
+ raise Exception(
196
+ f"Artifact {artifact_id} on job {job_id} could not be promoted in a reasonable time"
197
+ )
198
+
199
+ def create_job_artifact(self, job_id, type, url, promote=False):
200
+ """
201
+ Post an artifact to a job
202
+ """
203
+ response = requests.post(
204
+ url=f"{self._root_url}/jobs/{job_id}/artifacts?promote={promote}&apikey={self._api_key}",
205
+ json={
206
+ "created_on_node": self._node_id,
207
+ "type": type,
208
+ "url": url,
209
+ },
210
+ )
211
+ response.raise_for_status()
212
+ return response.json()
213
+
214
+ def create_job_artifact_from_file(self, job_id, type, filename, promote=False):
215
+ """
216
+ Post an artifact to a job
217
+ """
218
+ return self.create_job_artifact(
219
+ job_id, type, f"file://{self._node_id}{filename}", promote
220
+ )
221
+
222
+ def update_job_artifact(self, job_id, artifact_id, artifact):
223
+ """
224
+ Update an artifact
225
+ """
226
+ response = requests.put(
227
+ url=f"{self._root_url}/jobs/{job_id}/artifacts/{artifact_id}?apikey={self._api_key}",
228
+ json=artifact,
229
+ )
230
+ response.raise_for_status()
231
+ return response.json()
232
+
233
+ def promote_job_artifact(self, job_id, artifact_id):
234
+ """
235
+ Promote an artifact to a job
236
+ """
237
+ response = requests.put(
238
+ url=f"{self._root_url}/jobs/{job_id}/artifacts/{artifact_id}/promote?apikey={self._api_key}",
239
+ )
240
+ response.raise_for_status()
241
+ return response.json()
242
+
243
+ def delete_job(self, job_id):
244
+ """
245
+ Delete a job
246
+ """
247
+ response = requests.delete(
248
+ url=f"{self._root_url}/jobs/{job_id}?apikey={self._api_key}",
249
+ )
250
+ response.raise_for_status()
251
+ return
252
+
253
+ def create_job_data(self, job_id: str, data: NameQuantityPair):
254
+ """
255
+ Create job data
256
+ """
257
+ response = requests.post(
258
+ url=f"{self._root_url}/jobs/{job_id}/data?apikey={self._api_key}",
259
+ json=data.to_dict(),
260
+ )
261
+ response.raise_for_status()
262
+ return response.json()
263
+
264
+ def update_job_data(self, job_id: str, data_name: str, data: NameQuantityPair):
265
+ """
266
+ Update job data
267
+ """
268
+ response = requests.put(
269
+ url=f"{self._root_url}/jobs/{job_id}/data/{data_name}?apikey={self._api_key}",
270
+ json=data.to_dict(),
271
+ )
272
+ response.raise_for_status()
273
+ return response.json()
274
+
275
+ def delete_job_data(self, job_id: str, data_name: str):
276
+ """
277
+ Delete job data
278
+ """
279
+ response = requests.delete(
280
+ url=f"{self._root_url}/jobs/{job_id}/data/{data_name}?apikey={self._api_key}",
281
+ )
282
+ response.raise_for_status()
283
+
284
+ def get_reusable_artifact(self, hash):
285
+ """
286
+ Get a reusable artifact from the TAE API
287
+ """
288
+ response = requests.get(
289
+ url=f"{self._root_url}/reusable_artifacts/{hash}?apikey={self._api_key}",
290
+ )
291
+ response.raise_for_status()
292
+ return response.json()
293
+
294
+ def update_reusable_artifact(self, hash, reusable_artifact):
295
+ """
296
+ Update a reusable_artifact
297
+ """
298
+ response = requests.put(
299
+ url=f"{self._root_url}/reusable_artifacts/{hash}?apikey={self._api_key}",
300
+ json=reusable_artifact,
301
+ )
302
+ response.raise_for_status()
303
+ return response.json()
304
+
305
+ def update_reusable_artifact_url(self, hash, url, mimetype=None):
306
+ """
307
+ Update an reusable_artifact's URL
308
+ """
309
+ response = requests.patch(
310
+ url=f"{self._root_url}/reusable_artifacts/{hash}/url?apikey={self._api_key}",
311
+ json={"url": url, "mimetype": mimetype},
312
+ )
313
+ response.raise_for_status()
314
+ return response.json()
315
+
316
+ def create_reusable_artifact_data(self, hash, data: NameQuantityPair):
317
+ """
318
+ Create reusable_artifact data
319
+ """
320
+ response = requests.post(
321
+ url=f"{self._root_url}/reusable_artifacts/{hash}/data?apikey={self._api_key}",
322
+ json=data.to_dict(),
323
+ )
324
+ response.raise_for_status()
325
+ return response.json()
326
+
327
+ def promote_reusable_artifact(self, hash):
328
+ """
329
+ Promote reusable artifact
330
+ """
331
+ response = requests.put(
332
+ url=f"{self._root_url}/reusable_artifacts/{hash}/promote?apikey={self._api_key}",
333
+ )
334
+ response.raise_for_status()
335
+ return response.json()
336
+
337
+ def get_material(self, material_id):
338
+ """
339
+ Get a material from the TAE API
340
+ """
341
+ response = requests.get(
342
+ url=f"{self._root_url}/materials/{material_id}?apikey={self._api_key}",
343
+ )
344
+ response.raise_for_status()
345
+ return response.json()
346
+
347
+ def get_jobs(self):
348
+ """
349
+ Get all jobs
350
+ """
351
+ response = requests.get(
352
+ url=f"{self._root_url}/jobs?apikey={self._api_key}",
353
+ )
354
+ response.raise_for_status()
355
+ return response.json()
@@ -0,0 +1,239 @@
1
+ from . import Quantity, NameQuantityPair
2
+ from .api import JOB_STATUS, STATUS_JOB
3
+ import random
4
+ import requests
5
+ import pint
6
+ from webstompy import StompListener
7
+ from tqdm.auto import tqdm
8
+ import numpy as np
9
+ import logging
10
+ import uuid
11
+ import asyncio
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ q = pint.get_application_registry()
16
+
17
+
18
+ def decode(enc):
19
+ """Decode a quantity encoded object"""
20
+ if len(enc["magnitude"]) != 1:
21
+ enc_tuple = tuple(
22
+ (
23
+ np.array(enc["magnitude"], dtype=np.float64).reshape(enc["shape"]),
24
+ tuple((e["name"], e["exponent"]) for e in enc.get("units", ())),
25
+ )
26
+ )
27
+ else:
28
+ enc_tuple = (
29
+ enc["magnitude"][0],
30
+ tuple((e["name"], e["exponent"]) for e in enc.get("units", ())),
31
+ )
32
+ try:
33
+ quant = q.Quantity.from_tuple(enc_tuple)
34
+ quant.ito_base_units()
35
+ except:
36
+ logger.error(
37
+ "Error decoding {0}".format(
38
+ (
39
+ enc["magnitude"],
40
+ ((e["name"], e["exponent"]) for e in enc.get("units", ())),
41
+ )
42
+ )
43
+ )
44
+ raise
45
+
46
+ logger.debug("convert {i} -> {o:~P}".format(o=quant, i=enc))
47
+ return quant
48
+
49
+
50
+ class Machine(object):
51
+ def __init__(self, stator, rotor, winding, materials=None):
52
+
53
+ self.stator = stator
54
+ self.rotor = rotor
55
+ self.winding = winding
56
+ if materials is not None:
57
+ self.materials = materials
58
+ else:
59
+ self.materials = {
60
+ "rotor_lamination": "66018e5d1cd3bd0d3453646f", # default M230-35A
61
+ "rotor_magnet": "66018e5b1cd3bd0d3453646c", # default is N35UH
62
+ "rotor_air_L": "6602fb42c4a87c305481e8a6",
63
+ "rotor_air_R": "6602fb42c4a87c305481e8a6",
64
+ "rotor_banding": "6602fb42c4a87c305481e8a6",
65
+ "stator_lamination": "66018e5d1cd3bd0d3453646f", # default M230-35A
66
+ "stator_slot_wedge": "6602fb7239bfdea291a25dd7",
67
+ "stator_slot_liner": "6602fb5166d3c6adaa8ebe8c",
68
+ "stator_slot_winding": "66018e5d1cd3bd0d34536470",
69
+ "stator_slot_potting": "6602fd41b8e866414fe983ec",
70
+ }
71
+
72
+ def __repr__(self) -> str:
73
+ return f"Machine({self.stator}, {self.rotor}, {self.winding})"
74
+
75
+ def to_api(self):
76
+ stator_api = [
77
+ NameQuantityPair("stator", k, Quantity(*self.stator[k].to_tuple()))
78
+ for k in self.stator
79
+ ]
80
+ rotor_api = [
81
+ NameQuantityPair("rotor", k, Quantity(*self.rotor[k].to_tuple()))
82
+ for k in self.rotor
83
+ ]
84
+ winding_api = [
85
+ NameQuantityPair("winding", k, Quantity(*self.winding[k].to_tuple()))
86
+ for k in self.winding
87
+ ]
88
+ data = []
89
+ data.extend(list(x.to_dict() for x in stator_api))
90
+ data.extend(list(x.to_dict() for x in rotor_api))
91
+ data.extend(list(x.to_dict() for x in winding_api))
92
+ return data
93
+
94
+
95
+ class Job(object):
96
+ def __init__(self, machine: Machine, operating_point, simulation, title=None):
97
+ if title is None:
98
+ self.title = self.generate_title()
99
+ else:
100
+ self.title = title
101
+ self.type = "electromagnetic_spmbrl_fscwseg"
102
+ self.status = 0
103
+ self.machine = machine
104
+ self.operating_point = operating_point
105
+ self.simulation = simulation
106
+
107
+ def __repr__(self) -> str:
108
+ return f"Job({self.machine}, {self.operating_point}, {self.simulation})"
109
+
110
+ def generate_title(self):
111
+ "gets a random title from the wordlists"
112
+ random_offset = random.randint(500, 286797)
113
+ response = requests.get(
114
+ "https://github.com/taikuukaits/SimpleWordlists/raw/master/Wordlist-Adjectives-All.txt",
115
+ headers={
116
+ "Range": "bytes={1}-{0}".format(random_offset, random_offset - 500),
117
+ "accept-encoding": "identity",
118
+ },
119
+ )
120
+ adjective = random.choice(response.text.splitlines()[1:-1])
121
+ random_offset = random.randint(500, 871742)
122
+ response = requests.get(
123
+ "https://github.com/taikuukaits/SimpleWordlists/raw/master/Wordlist-Nouns-All.txt",
124
+ headers={
125
+ "Range": "bytes={1}-{0}".format(random_offset, random_offset - 500),
126
+ "accept-encoding": "identity",
127
+ },
128
+ )
129
+ noun = random.choice(response.text.splitlines()[1:-1])
130
+ title = f"{adjective}-{noun}"
131
+ return title
132
+
133
+ def to_api(self):
134
+ job = {
135
+ "status": 0,
136
+ "title": self.title,
137
+ "type": self.type,
138
+ "tasks": 11,
139
+ "data": [],
140
+ "materials": [],
141
+ }
142
+
143
+ operating_point_api = [
144
+ NameQuantityPair(
145
+ "operating_point", k, Quantity(*self.operating_point[k].to_tuple())
146
+ )
147
+ for k in self.operating_point
148
+ ]
149
+
150
+ simulation_api = [
151
+ NameQuantityPair("simulation", k, Quantity(*self.simulation[k].to_tuple()))
152
+ for k in self.simulation
153
+ ]
154
+
155
+ job["data"].extend(list(x.to_dict() for x in operating_point_api))
156
+ job["data"].extend(list(x.to_dict() for x in simulation_api))
157
+ job["data"].extend(self.machine.to_api())
158
+ job["materials"] = [
159
+ {"part": key, "material_id": value}
160
+ for key, value in self.machine.materials.items()
161
+ ]
162
+ return job
163
+
164
+ def run(self):
165
+ pass
166
+
167
+
168
+ class TqdmUpTo(tqdm):
169
+ """Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
170
+
171
+ def update_to(self, b=1, bsize=1, tsize=None):
172
+ """
173
+ b : int, optional
174
+ Number of blocks transferred so far [default: 1].
175
+ bsize : int, optional
176
+ Size of each block (in tqdm units) [default: 1].
177
+ tsize : int, optional
178
+ Total size (in tqdm units). If [default: None] remains unchanged.
179
+ """
180
+ if tsize is not None:
181
+ self.total = tsize
182
+ return self.update(b * bsize - self.n) # also sets self.n = b * bsize
183
+
184
+
185
+ class MyListener(StompListener):
186
+ def __init__(self, job, uid):
187
+ self.job_id = job.id
188
+ self.uid = uid
189
+ self.done = False
190
+ self._callback_fn = None # Initialize the callback function
191
+
192
+ @property
193
+ def callback_fn(self):
194
+ return self._callback_fn
195
+
196
+ @callback_fn.setter
197
+ def callback_fn(self, fn):
198
+ self._callback_fn = fn
199
+
200
+ # Rest of the code...
201
+
202
+ def on_message(self, frame):
203
+ headers = {key.decode(): value.decode() for key, value in frame.header}
204
+ if headers["subscription"] == self.uid:
205
+ try:
206
+ time_str, level_str, mesg_str = frame.message.decode().split(" - ")
207
+ except ValueError:
208
+ logger.warning(frame)
209
+
210
+ else:
211
+ line = [part.strip() for part in mesg_str.strip().split(":")]
212
+ if line[1] == "Time":
213
+ done, total = (int(part) for part in line[2].split("/"))
214
+ self.callback_fn(done, tsize=total)
215
+ if done == total:
216
+ self.done = True
217
+ return self.done
218
+ else:
219
+ return
220
+
221
+
222
+ async def async_job_monitor(api, my_job, connection, position):
223
+ uid = str(uuid.uuid4())
224
+ listener = MyListener(my_job, uid)
225
+ connection.add_listener(listener)
226
+ connection.subscribe(destination=f"/topic/{my_job.id}.solver.*.progress", id=uid)
227
+ with TqdmUpTo(
228
+ total=my_job.simulation["timestep_intervals"],
229
+ desc=f"Job {my_job.title}",
230
+ position=position,
231
+ leave=False,
232
+ ) as pbar:
233
+ listener.callback_fn = pbar.update_to
234
+
235
+ j1_result = api.update_job_status(my_job.id, JOB_STATUS["QueuedForMeshing"])
236
+
237
+ while not listener.done:
238
+ await asyncio.sleep(1) # sleep for a second
239
+ return STATUS_JOB[api.get_job(my_job.id)["status"]]
@@ -0,0 +1,393 @@
1
+ import functools
2
+ import json
3
+ import logging
4
+ import os
5
+ import pika
6
+ from socket import gaierror
7
+ import platform
8
+ import ssl
9
+ import sys
10
+ import threading
11
+ import time
12
+
13
+ from pathlib import Path
14
+ from python_logging_rabbitmq import RabbitMQHandler
15
+
16
+ from ltc_client.api import Api
17
+
18
+ RABBIT_DEFAULT_PRE_FETCH_COUNT = 1
19
+ RABBIT_FIRST_WAIT_BEFORE_RERTY_SECS = 0.5
20
+ RABBIT_MAX_WAIT_BEFORE_RERTY_SECS = 64
21
+ LOGGING_LEVEL = logging.INFO
22
+
23
+
24
+ ### Configure Logging
25
+ logger = logging.getLogger() # get the root logger?
26
+ logger.setLevel(LOGGING_LEVEL)
27
+ tld = threading.local()
28
+ tld.job_id = "NoJobId"
29
+
30
+
31
+ class HostnameFilter(logging.Filter):
32
+ """Used for logging the hostname
33
+ https://stackoverflow.com/a/55584223/20882432
34
+ """
35
+
36
+ hostname = platform.node()
37
+
38
+ def filter(self, record):
39
+ record.hostname = HostnameFilter.hostname
40
+ return True
41
+
42
+
43
+ class DefaultIdLogFilter(logging.Filter):
44
+ """Used for logging the job id"""
45
+
46
+ def filter(self, record):
47
+ if not hasattr(tld, "job_id"):
48
+ record.id = "NoJobId"
49
+ else:
50
+ record.id = tld.job_id
51
+ return True
52
+
53
+
54
+ def addLoggingLevel(levelName: str, levelNum: int, methodName: str = None) -> None:
55
+ """
56
+ Comprehensively adds a new logging level to the `logging` module and the
57
+ currently configured logging class.
58
+
59
+ `levelName` becomes an attribute of the `logging` module with the value
60
+ `levelNum`. `methodName` becomes a convenience method for both `logging`
61
+ itself and the class returned by `logging.getLoggerClass()` (usually just
62
+ `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
63
+ used.
64
+
65
+ To avoid accidental clobberings of existing attributes, this method will
66
+ raise an `AttributeError` if the level name is already an attribute of the
67
+ `logging` module or if the method name is already present
68
+
69
+ Example
70
+ -------
71
+ >>> addLoggingLevel('TRACE', logging.DEBUG - 5)
72
+ >>> logging.getLogger(__name__).setLevel("TRACE")
73
+ >>> logging.getLogger(__name__).trace('that worked')
74
+ >>> logging.trace('so did this')
75
+ >>> logging.TRACE
76
+ 5
77
+
78
+ """
79
+ if not methodName:
80
+ methodName = levelName.lower()
81
+
82
+ if hasattr(logging, levelName):
83
+ raise AttributeError("{} already defined in logging module".format(levelName))
84
+ if hasattr(logging, methodName):
85
+ raise AttributeError("{} already defined in logging module".format(methodName))
86
+ if hasattr(logging.getLoggerClass(), methodName):
87
+ raise AttributeError("{} already defined in logger class".format(methodName))
88
+
89
+ # raise a value error if the level number is not an integer
90
+ if not isinstance(levelNum, int):
91
+ raise ValueError("levelNum must be an integer")
92
+
93
+ # This method was inspired by the answers to Stack Overflow post
94
+ # http://stackoverflow.com/q/2183233/2988730, especially
95
+ # http://stackoverflow.com/a/13638084/2988730
96
+ def logForLevel(self, message: str, *args, **kwargs) -> None:
97
+ if self.isEnabledFor(levelNum):
98
+ self._log(levelNum, message, args, **kwargs)
99
+
100
+ def logToRoot(message: str, *args, **kwargs) -> None:
101
+ logging.log(levelNum, message, *args, **kwargs)
102
+
103
+ logging.addLevelName(levelNum, levelName)
104
+ setattr(logging, levelName, levelNum)
105
+ setattr(logging.getLoggerClass(), methodName, logForLevel)
106
+ setattr(logging, methodName, logToRoot)
107
+
108
+
109
+ addLoggingLevel("PROGRESS", logging.INFO + 2)
110
+
111
+ stream_handler = logging.StreamHandler(stream=sys.stdout)
112
+ stream_handler.addFilter(HostnameFilter())
113
+ stream_handler.addFilter(DefaultIdLogFilter())
114
+ stream_handler.setFormatter(
115
+ logging.Formatter(
116
+ "%(asctime)s - %(id)s - %(levelname)s - %(hostname)s - %(filename)s->%(funcName)s() - %(message)s"
117
+ )
118
+ )
119
+
120
+ logger.addHandler(stream_handler)
121
+
122
+
123
+ class StandardWorker:
124
+ """
125
+ The standard TAE worker class
126
+ """
127
+
128
+ def __init__(
129
+ self,
130
+ node_id,
131
+ worker_name,
132
+ queue_host,
133
+ queue_port,
134
+ queue_user,
135
+ queue_password,
136
+ queue_use_ssl,
137
+ queue_exchange,
138
+ queue_prefetch_count=RABBIT_DEFAULT_PRE_FETCH_COUNT,
139
+ x_priority=0,
140
+ projects_path=os.getenv("PROJECTS_PATH"),
141
+ ):
142
+ self._threads = []
143
+ self._node_id = node_id
144
+ self._worker_name = worker_name
145
+ self._exchange = queue_exchange
146
+ self._x_priority = x_priority
147
+ self._projects_path = projects_path
148
+ self._send_log_as_artifact = True
149
+
150
+ if queue_use_ssl:
151
+ ssl_options = pika.SSLOptions(context=ssl.create_default_context())
152
+ else:
153
+ ssl_options = None
154
+
155
+ self._connection = _rabbitmq_connect(
156
+ node_id,
157
+ worker_name,
158
+ queue_host,
159
+ queue_port,
160
+ queue_user,
161
+ queue_password,
162
+ ssl_options,
163
+ )
164
+
165
+ self._channel = self._connection.channel()
166
+ self._channel.basic_qos(prefetch_count=queue_prefetch_count, global_qos=False)
167
+ self._channel.exchange_declare(
168
+ exchange=queue_exchange, exchange_type="topic", durable=True
169
+ )
170
+
171
+ rabbit_handler = RabbitMQHandler(
172
+ host=queue_host,
173
+ port=queue_port,
174
+ username=queue_user,
175
+ password=queue_password,
176
+ connection_params={"ssl_options": ssl_options},
177
+ exchange="amq.topic",
178
+ declare_exchange=True,
179
+ routing_key_formatter=lambda r: (
180
+ "{jobid}.{worker_name}.{type}.{level}".format(
181
+ jobid=r.id,
182
+ worker_name=worker_name,
183
+ type="python",
184
+ level=r.levelname.lower(),
185
+ )
186
+ ),
187
+ )
188
+
189
+ rabbit_handler.addFilter(HostnameFilter())
190
+ rabbit_handler.addFilter(DefaultIdLogFilter())
191
+ rabbit_handler.setFormatter(
192
+ logging.Formatter(
193
+ "%(asctime)s - %(levelname)s - %(message)s", datefmt="%H:%M:%S"
194
+ )
195
+ )
196
+
197
+ logger.addHandler(rabbit_handler)
198
+
199
+ def bind(self, queue, routing_key, func):
200
+ ch = self._channel
201
+
202
+ ch.queue_declare(
203
+ queue=queue,
204
+ durable=True,
205
+ exclusive=False,
206
+ )
207
+ ch.queue_bind(exchange=self._exchange, queue=queue, routing_key=routing_key)
208
+
209
+ # If func was provided, register the callback
210
+ if func is not None:
211
+ ch.basic_consume(
212
+ queue=queue,
213
+ on_message_callback=functools.partial(
214
+ self._threaded_callback,
215
+ args=(func, self._connection, ch, self._threads),
216
+ ),
217
+ arguments={"x-priority": self._x_priority},
218
+ )
219
+
220
+ logger.info(f"Declare::Bind, Q::RK, {queue}::{routing_key}")
221
+
222
+ def start(self):
223
+ try:
224
+ logger.info("Starting to consume messages")
225
+ self._channel.start_consuming()
226
+ except KeyboardInterrupt:
227
+ logger.info("Stopping consuming ...")
228
+ self._channel.stop_consuming()
229
+ logger.info("Stopped consuming messages")
230
+
231
+ # Wait for all to complete
232
+ for thread in self._threads:
233
+ thread.join()
234
+
235
+ # Close connection
236
+ self._connection.close()
237
+
238
+ def queue_message(self, routing_key, body):
239
+ _rabbitmq_queue_message(self._channel, self._exchange, routing_key, body)
240
+
241
+ def _threaded_callback(self, ch, method_frame, _header_frame, body, args):
242
+ (func, conn, ch, thrds) = args
243
+ delivery_tag = method_frame.delivery_tag
244
+ t = threading.Thread(
245
+ target=self._do_threaded_callback,
246
+ args=(conn, ch, delivery_tag, func, body),
247
+ )
248
+ t.start()
249
+ thrds.append(t)
250
+ logger.info(
251
+ "Thread count: %i of which %i active", len(thrds), threading.active_count()
252
+ )
253
+
254
+ def _do_threaded_callback(self, conn, ch, delivery_tag, func, body):
255
+
256
+ thread_id = threading.get_ident()
257
+ payload = json.loads(body.decode())
258
+ tld.job_id = payload["id"]
259
+
260
+ api_root = os.getenv("API_ROOT_URL")
261
+ api_key = payload.get("apikey", None)
262
+
263
+ can_send_log_as_artifact = self._send_log_as_artifact and api_root and api_key
264
+
265
+ job_log_directory = f"{self._projects_path}/jobs/{tld.job_id}"
266
+ job_log_filename = f"{job_log_directory}/{self._worker_name}.log"
267
+
268
+ if can_send_log_as_artifact:
269
+
270
+ # Emsure the job directory exists
271
+ Path(job_log_directory).mkdir(parents=True, exist_ok=True)
272
+
273
+ # Set up the log file handler for this job
274
+ file_handler = logging.FileHandler(filename=job_log_filename, mode="a")
275
+ file_handler.addFilter(HostnameFilter())
276
+ file_handler.addFilter(DefaultIdLogFilter())
277
+ file_handler.setFormatter(
278
+ logging.Formatter(
279
+ "%(asctime)s - %(id)s - %(levelname)s - %(hostname)s - %(filename)s->%(funcName)s() - %(message)s"
280
+ )
281
+ )
282
+
283
+ logger.addHandler(file_handler)
284
+
285
+ logger.info(
286
+ "Thread id: %s Delivery tag: %s Message body: %s Job id: %s",
287
+ thread_id,
288
+ delivery_tag,
289
+ body,
290
+ tld.job_id,
291
+ )
292
+
293
+ next_routing_key, new_body = func(body)
294
+ if new_body is not None:
295
+ body = new_body
296
+ if next_routing_key is not None:
297
+ logger.info(f"next routing key: {next_routing_key}")
298
+ cbq = functools.partial(self.queue_message, next_routing_key, body)
299
+ conn.add_callback_threadsafe(cbq)
300
+
301
+ cb = functools.partial(_rabbitmq_ack_message, ch, delivery_tag)
302
+ conn.add_callback_threadsafe(cb)
303
+
304
+ if can_send_log_as_artifact:
305
+ logger.removeHandler(file_handler)
306
+ try:
307
+ logger.info("Creating artifact from job log")
308
+ api = Api(root_url=api_root, api_key=api_key, node_id=self._node_id)
309
+ api.create_job_artifact_from_file(
310
+ tld.job_id, f"{self._worker_name}_log", job_log_filename
311
+ )
312
+ except Exception as e:
313
+ logger.error(f"Failed to create artifact from job log: {e}")
314
+
315
+
316
+ def _rabbitmq_connect(node_id, worker_name, host, port, user, password, ssl_options):
317
+ client_properties = {
318
+ "connection_name": f"{node_id}-{worker_name}-{platform.node()}"
319
+ }
320
+
321
+ connection_params = pika.ConnectionParameters(
322
+ host=host,
323
+ port=port,
324
+ credentials=pika.PlainCredentials(user, password),
325
+ client_properties=client_properties,
326
+ heartbeat=10,
327
+ ssl_options=ssl_options,
328
+ )
329
+
330
+ sleepTime = RABBIT_FIRST_WAIT_BEFORE_RERTY_SECS
331
+ connected = False
332
+
333
+ while not connected:
334
+ try:
335
+ logger.info("Trying to connect to the rabbitmq server")
336
+ connection = pika.BlockingConnection(connection_params)
337
+
338
+ except pika.exceptions.AMQPConnectionError as err:
339
+ sleepTime *= 2
340
+ if sleepTime >= RABBIT_MAX_WAIT_BEFORE_RERTY_SECS:
341
+ logger.error(f"Failed to connect to the rabbitmq after {sleepTime} s")
342
+ raise err
343
+ else:
344
+ logger.warning(
345
+ f"Failed to connect to the rabbitmq, retry in {sleepTime} s"
346
+ )
347
+ time.sleep(sleepTime)
348
+ except gaierror as err:
349
+ sleepTime *= 2
350
+ if sleepTime >= RABBIT_MAX_WAIT_BEFORE_RERTY_SECS:
351
+ logger.error(f"Failed to connect to the rabbitmq after {sleepTime} s")
352
+ raise err
353
+ else:
354
+ logger.warning(
355
+ f"Failed to connect to the rabbitmq, [{err}] retry in {sleepTime} s"
356
+ )
357
+ time.sleep(sleepTime)
358
+ except Exception as err:
359
+ logger.error(f"Failed to connect to the rabbitmq: {err}")
360
+ raise err
361
+
362
+ else:
363
+ connected = True
364
+
365
+ return connection
366
+
367
+
368
+ def _rabbitmq_ack_message(ch, delivery_tag):
369
+ """Note that `ch` must be the same pika channel instance via which
370
+ the message being ACKed was retrieved (AMQP protocol constraint).
371
+ """
372
+ if ch.is_open:
373
+ logger.info("Acknowledging message %s", delivery_tag)
374
+ ch.basic_ack(delivery_tag)
375
+ else:
376
+ # Channel is already closed, so we can't ACK this message;
377
+ # log and/or do something that makes sense for your app in this case.
378
+ logger.error("Channel is closed, cannot ack message")
379
+
380
+
381
+ def _rabbitmq_queue_message(ch, exchange, routing_key, body):
382
+ if ch.is_open:
383
+ logger.info(f"Sending {body} to {routing_key}")
384
+ ch.basic_publish(
385
+ exchange=exchange,
386
+ routing_key=routing_key,
387
+ body=body,
388
+ properties=pika.BasicProperties(
389
+ delivery_mode=2, # make message persistent
390
+ ),
391
+ )
392
+ else:
393
+ logger.error("Channel is closed, cannot queue message")
@@ -0,0 +1,42 @@
1
+ [project.urls]
2
+ Homepage = "https://github.com/TinArmEngineering/ltc_client/"
3
+ Issues = "https://github.com/TinArmEngineering/ltc_client/issues"
4
+
5
+ [tool.poetry]
6
+ name = "ltc-client"
7
+ version = "0.2"
8
+ description = "\"Client and Worker module for Tin Arm Engineering LTC service\""
9
+ authors = ["Martin West <martin@tinarmengineering.com>"]
10
+ license = "MIT"
11
+ readme = "README.md"
12
+ classifiers = [
13
+ "Programming Language :: Python :: 3",
14
+ "License :: OSI Approved :: MIT License",
15
+ "Operating System :: OS Independent",
16
+ ]
17
+ repository = "https://github.com/TinArmEngineering/ltc_client/"
18
+
19
+
20
+ [tool.poetry.dependencies]
21
+ python = "^3.10"
22
+ pika = "^1.3.2"
23
+ python-logging-rabbitmq = "^2.3.0"
24
+ requests = "^2.32.3"
25
+ Pint = "^0.24"
26
+ numpy = "^2.0.0"
27
+ webstompy = "^0.1.3"
28
+ tqdm = "^4.66.4"
29
+
30
+ [tool.poetry.group.dev.dependencies]
31
+ black = "^24.4.2"
32
+ pytest = "^8.2.2"
33
+
34
+ [tool.poetry.group.test.dependencies]
35
+ mock = "^5.1.0"
36
+ teamcity-messages = "^1.32"
37
+ pytest = "^8.2.2"
38
+ coverage = "^7.5.3"
39
+
40
+ [build-system]
41
+ requires = ["poetry-core"]
42
+ build-backend = "poetry.core.masonry.api"