harnice 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. harnice/__init__.py +0 -0
  2. harnice/__main__.py +4 -0
  3. harnice/cli.py +234 -0
  4. harnice/fileio.py +295 -0
  5. harnice/gui/launcher.py +426 -0
  6. harnice/lists/channel_map.py +182 -0
  7. harnice/lists/circuits_list.py +302 -0
  8. harnice/lists/disconnect_map.py +237 -0
  9. harnice/lists/formboard_graph.py +63 -0
  10. harnice/lists/instances_list.py +280 -0
  11. harnice/lists/library_history.py +40 -0
  12. harnice/lists/manifest.py +93 -0
  13. harnice/lists/post_harness_instances_list.py +66 -0
  14. harnice/lists/rev_history.py +325 -0
  15. harnice/lists/signals_list.py +135 -0
  16. harnice/products/__init__.py +1 -0
  17. harnice/products/cable.py +152 -0
  18. harnice/products/chtype.py +80 -0
  19. harnice/products/device.py +844 -0
  20. harnice/products/disconnect.py +225 -0
  21. harnice/products/flagnote.py +139 -0
  22. harnice/products/harness.py +522 -0
  23. harnice/products/macro.py +10 -0
  24. harnice/products/part.py +640 -0
  25. harnice/products/system.py +125 -0
  26. harnice/products/tblock.py +270 -0
  27. harnice/state.py +57 -0
  28. harnice/utils/appearance.py +51 -0
  29. harnice/utils/circuit_utils.py +326 -0
  30. harnice/utils/feature_tree_utils.py +183 -0
  31. harnice/utils/formboard_utils.py +973 -0
  32. harnice/utils/library_utils.py +333 -0
  33. harnice/utils/note_utils.py +417 -0
  34. harnice/utils/svg_utils.py +819 -0
  35. harnice/utils/system_utils.py +563 -0
  36. harnice-0.3.0.dist-info/METADATA +32 -0
  37. harnice-0.3.0.dist-info/RECORD +41 -0
  38. harnice-0.3.0.dist-info/WHEEL +5 -0
  39. harnice-0.3.0.dist-info/entry_points.txt +3 -0
  40. harnice-0.3.0.dist-info/licenses/LICENSE +19 -0
  41. harnice-0.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,325 @@
1
+ import os
2
+ import csv
3
+ import ast
4
+ import importlib
5
+ from harnice import fileio, state
6
+
7
+ # === Global Columns Definition ===
8
+ COLUMNS = [
9
+ "product", #documentation needed
10
+ "mfg", #documentation needed
11
+ "pn", #documentation needed
12
+ "desc", #documentation needed
13
+ "rev", #documentation needed
14
+ "status", #documentation needed
15
+ "releaseticket", #documentation needed
16
+ "library_repo", #documentation needed
17
+ "library_subpath", #documentation needed
18
+ "datestarted", #documentation needed
19
+ "datemodified", #documentation needed
20
+ "datereleased", #documentation needed
21
+ "git_hash_of_harnice_src", #documentation needed
22
+ "drawnby", #documentation needed
23
+ "checkedby", #documentation needed
24
+ "revisionupdates", #documentation needed
25
+ "affectedinstances", #documentation needed
26
+ ]
27
+
28
+
29
+ def overwrite(content_dict):
30
+ PROTECTED_KEYS = [
31
+ "product",
32
+ "mfg",
33
+ "pn",
34
+ "rev",
35
+ "releaseticket",
36
+ "library_repo",
37
+ "library_subpath",
38
+ "datestarted",
39
+ ]
40
+ # 1) Ensure no unknown keys
41
+ for key in content_dict:
42
+ if key not in COLUMNS:
43
+ raise KeyError(
44
+ f"Harnice does not allow writing unknown key '{key}'. "
45
+ f"Valid columns: {', '.join(COLUMNS)}"
46
+ )
47
+
48
+ # 2) Ensure none of the protected keys are being modified
49
+ for key in PROTECTED_KEYS:
50
+ if key in content_dict:
51
+ raise KeyError(
52
+ f"Harnice does not allow overwriting '{key}' by script.\n"
53
+ f"Please edit the revision history manually."
54
+ )
55
+
56
+ # 3) Load or create revision history
57
+ path = fileio.path("revision history")
58
+ if not os.path.exists(path):
59
+ new() # Creates a blank rev history with header
60
+
61
+ rows = fileio.read_tsv("revision history")
62
+
63
+ # 4) Determine which revision we are updating from state
64
+ target_rev = str(state.rev).strip()
65
+ if not target_rev:
66
+ raise RuntimeError("state.rev is not set. Did verify_revision_structure() run?")
67
+
68
+ # 5) Update matching row
69
+ found = False
70
+ for row in rows:
71
+ if str(row.get("rev", "")).strip() == target_rev:
72
+ found = True
73
+ for key, value in content_dict.items():
74
+ row[key] = value
75
+
76
+ if not found:
77
+ raise ValueError(f"No revision '{target_rev}' found in revision history.")
78
+
79
+ # 6) Write updated TSV
80
+ with open(path, "w", newline="", encoding="utf-8") as f:
81
+ writer = csv.DictWriter(f, fieldnames=COLUMNS, delimiter="\t")
82
+ writer.writeheader()
83
+ writer.writerows(rows)
84
+
85
+
86
+ def info(rev=None, path=None, field=None, all=False):
87
+ if path is None:
88
+ path = fileio.path("revision history")
89
+
90
+ if not os.path.exists(path):
91
+ raise FileNotFoundError(f"Revision history file not found at {path}")
92
+
93
+ if rev:
94
+ rev = str(rev)
95
+ else:
96
+ rev = state.partnumber("R")
97
+
98
+ with open(path, newline="", encoding="utf-8") as f:
99
+ reader = csv.DictReader(f, delimiter="\t")
100
+ rows = list(reader)
101
+
102
+ if all:
103
+ return rows
104
+
105
+ for row in rows:
106
+ if row.get("rev") == rev:
107
+
108
+ # ------------------------------------------------------
109
+ # Field requested
110
+ # ------------------------------------------------------
111
+ if field:
112
+ val = row.get(field)
113
+
114
+ if field == "affectedinstances":
115
+ if not val or val.strip() == "":
116
+ return []
117
+ try:
118
+ return ast.literal_eval(val)
119
+ except Exception:
120
+ # fallback: return empty list if malformed
121
+ return []
122
+
123
+ # other fields unchanged
124
+ return val
125
+
126
+ # ------------------------------------------------------
127
+ # Entire row requested: parse affectedinstances only
128
+ # ------------------------------------------------------
129
+ full_row = dict(row)
130
+ ai = row.get("affectedinstances")
131
+ if ai and ai.strip() != "":
132
+ try:
133
+ full_row["affectedinstances"] = ast.literal_eval(ai)
134
+ except Exception:
135
+ full_row["affectedinstances"] = []
136
+ else:
137
+ full_row["affectedinstances"] = []
138
+
139
+ return full_row
140
+
141
+ raise ValueError(f"Revision {rev} not found in revision history at {path}")
142
+
143
+
144
+ def initial_release_exists():
145
+ try:
146
+ for row in fileio.read_tsv("revision history"):
147
+ if str(row.get("revisionupdates", "")).strip() == "INITIAL RELEASE":
148
+ return True
149
+ else:
150
+ return False
151
+ except NameError:
152
+ return False
153
+
154
+
155
+ def initial_release_desc():
156
+ for row in fileio.read_tsv("revision history"):
157
+ if row.get("revisionupdates") == "INITIAL RELEASE":
158
+ return row.get("desc")
159
+
160
+
161
+ def update_datemodified():
162
+ target_rev = state.partnumber("R")
163
+
164
+ # Read all rows
165
+ with open(fileio.path("revision history"), newline="", encoding="utf-8") as f_in:
166
+ reader = csv.DictReader(f_in, delimiter="\t")
167
+ rows = list(reader)
168
+
169
+ # Modify matching row(s)
170
+ for row in rows:
171
+ if row.get("rev", "").strip() == target_rev:
172
+ row["datemodified"] = fileio.today()
173
+ row["drawnby"] = fileio.drawnby()["name"]
174
+ row["git_hash_of_harnice_src"] = fileio.get_git_hash_of_harnice_src()
175
+
176
+ # Write back
177
+ with open(
178
+ fileio.path("revision history"), "w", newline="", encoding="utf-8"
179
+ ) as f_out:
180
+ writer = csv.DictWriter(f_out, fieldnames=COLUMNS, delimiter="\t")
181
+ writer.writeheader()
182
+ writer.writerows(rows)
183
+
184
+
185
+ def new():
186
+ columns = COLUMNS
187
+ from harnice.cli import select_product_type
188
+
189
+ global product
190
+ product = select_product_type()
191
+ with open(fileio.path("revision history"), "w", newline="", encoding="utf-8") as f:
192
+ writer = csv.DictWriter(f, fieldnames=columns, delimiter="\t")
193
+ writer.writeheader()
194
+
195
+
196
+ def append(next_rev=None):
197
+ from harnice import cli
198
+
199
+ global product
200
+
201
+ if not os.path.exists(fileio.path("revision history")):
202
+ new()
203
+ rows = fileio.read_tsv("revision history")
204
+ product_name = None
205
+ if rows:
206
+ for row in reversed(rows):
207
+ candidate = (row.get("product") or "").strip()
208
+ if candidate:
209
+ product_name = candidate
210
+ break
211
+ if not product_name:
212
+ product_name = globals().get("product")
213
+ if not product_name:
214
+ product_name = cli.select_product_type()
215
+ product = product_name
216
+
217
+ default_desc = ""
218
+ if product_name:
219
+ try:
220
+ product_module = importlib.import_module(f"harnice.products.{product_name}")
221
+ except ModuleNotFoundError:
222
+ product_module = None
223
+ else:
224
+ default_desc = getattr(product_module, "default_desc", "") or ""
225
+
226
+ desc = ""
227
+ if next_rev != 1:
228
+ # find the highest revision in the table
229
+ try:
230
+ highest_existing_rev = max(
231
+ int(row.get("rev", 0)) for row in rows if row.get("rev")
232
+ )
233
+ except ValueError:
234
+ highest_existing_rev = None
235
+
236
+ if next_rev != highest_existing_rev:
237
+ for row in rows:
238
+ if int(row.get("rev", 0)) == highest_existing_rev:
239
+ desc = row.get("desc")
240
+ if row.get("status") in [None, ""]:
241
+ print(
242
+ f"Your existing highest revision ({highest_existing_rev}) has no status. Do you want to obsolete it?"
243
+ )
244
+ obsolete_message = cli.prompt(
245
+ "Type your message here, leave blank for 'OBSOLETE' message, or type 'n' to keep it blank.",
246
+ default="OBSOLETE",
247
+ )
248
+ if obsolete_message == "n":
249
+ obsolete_message = ""
250
+ row["status"] = obsolete_message # ← modified here
251
+ break
252
+
253
+ if desc in [None, ""]:
254
+ desc = cli.prompt(
255
+ f"Enter a description of this {product_name}",
256
+ default=default_desc,
257
+ )
258
+
259
+ revisionupdates = "INITIAL RELEASE"
260
+ if initial_release_exists():
261
+ revisionupdates = ""
262
+ revisionupdates = cli.prompt(
263
+ "Enter a description for this revision", default=revisionupdates
264
+ )
265
+ while not revisionupdates or not revisionupdates.strip():
266
+ print("Revision updates can't be blank!")
267
+ revisionupdates = cli.prompt(
268
+ "Enter a description for this revision", default=None
269
+ )
270
+
271
+ # add lib_repo if filepath is found in library locations
272
+ library_repo = ""
273
+ library_subpath = ""
274
+ cwd = str(os.getcwd()).lower().strip("~")
275
+
276
+ for row in fileio.read_tsv("library locations", delimiter=","):
277
+ lib_local_path = str(row.get("local_path", "")).lower().strip("~")
278
+ if lib_local_path in cwd:
279
+ library_repo = row.get("repo_url")
280
+
281
+ # keep only the portion AFTER local_path
282
+ idx = cwd.find(lib_local_path)
283
+ remainder = cwd[idx + len(lib_local_path) :].lstrip("/")
284
+ parts = remainder.split("/")
285
+
286
+ # find the part number in the path
287
+ pn = str(state.partnumber("pn")).lower()
288
+ if pn in parts:
289
+ pn_index = parts.index(pn)
290
+ core_parts = parts[:pn_index] # everything before pn
291
+ else:
292
+ core_parts = parts
293
+
294
+ # build library_subpath and product
295
+ if core_parts:
296
+ library_subpath = (
297
+ "/".join(core_parts[1:]) + "/" if len(core_parts) > 1 else ""
298
+ ) # strip out the first element (product type)
299
+ else:
300
+ library_subpath = ""
301
+
302
+ break
303
+
304
+ ####
305
+
306
+ rows.append(
307
+ {
308
+ "product": product_name,
309
+ "pn": state.pn,
310
+ "rev": next_rev,
311
+ "desc": desc,
312
+ "status": "",
313
+ "library_repo": library_repo,
314
+ "library_subpath": library_subpath,
315
+ "datestarted": fileio.today(),
316
+ "datemodified": fileio.today(),
317
+ "revisionupdates": revisionupdates,
318
+ }
319
+ )
320
+
321
+ columns = COLUMNS
322
+ with open(fileio.path("revision history"), "w", newline="", encoding="utf-8") as f:
323
+ writer = csv.DictWriter(f, fieldnames=columns, delimiter="\t")
324
+ writer.writeheader()
325
+ writer.writerows(rows)
@@ -0,0 +1,135 @@
1
+ import csv
2
+ import os
3
+ from harnice import fileio
4
+
5
+ list_type = None
6
+ COLUMNS = []
7
+
8
+ # Signals list column headers to match source of truth + compatibility change
9
+ DEVICE_COLUMNS = [
10
+ "channel_id", # Unique identifier for the channel.
11
+ "signal", # Name of the electrical function of that signal, as it pertains to its channel type defition. i.e. "positive"
12
+ "connector_name", # Unique identifier for the connector that this signal and channel is a part of.
13
+ "cavity", # Identifier of the pin, socket, stud, etc, that this signal is internally electrically routed to within its connector.
14
+ "connector_mpn", #MPN of the connector in this device (NOT the mating connector).
15
+ "channel_type", #The channel type of this signal. \n{% include-markdown "fragments/channel_type_reference.md" %}
16
+ "config_variable", #Change header or add more headers as needed. Blank: row is true across all values of this field. Otherwise, row is only true when configuration matches the value of this field.
17
+ ]
18
+
19
+ DISCONNECT_COLUMNS = [
20
+ "channel_id", # Unique identifier for the channel.
21
+ "signal",# Name of the electrical function of that signal, as it pertains to its channel type defition. i.e. "positive"
22
+ "A_cavity", #Identifier of the pin, socket, stud, etc, that this signal is internally electrically routed to within that side of the connector.\n??? question "Why are A and B different here?"\n Sometimes it's possible to have connectors that have cavities that may mate electrically, but have different names. For example, suppose two connectors physically mate, but are made by different manufacturers. One manufacturer used lowercase (a, b, c) to reference the cavities but the other used uppercase (A, B, C), or numbers (1, 2, 3), or colors (red, green, blue), etc.
23
+ "B_cavity" #Identifier of the pin, socket, stud, etc, that this signal is internally electrically routed to within that side of the connector.\n??? question "Why are A and B different here?"\n Sometimes it's possible to have connectors that have cavities that may mate electrically, but have different names. For example, suppose two connectors physically mate, but are made by different manufacturers. One manufacturer used lowercase (a, b, c) to reference the cavities but the other used uppercase (A, B, C), or numbers (1, 2, 3), or colors (red, green, blue), etc.
24
+ "A_connector_mpn", #MPN of the connector of the harness on this side of the disconnect
25
+ "A_channel_type", #The channel type of this side of the discconect.\n??? question "Why are A and B different here?"\n It's important to keep track of which side has which channel type so that you cannot accidentally flip pins and sockets, for example, by mapping the wrong channel type to the wrong pin gender. Careful validation should be done when mapping channels through disconnects to ensure the disconnects have channels that pass through them in the correct direction.
26
+ "B_connector_mpn",#MPN of the connector of the harness on this side of the disconnect
27
+ "B_channel_type", #The channel type of this side of the discconect.\n??? question "Why are A and B different here?"\n It's important to keep track of which side has which channel type so that you cannot accidentally flip pins and sockets, for example, by mapping the wrong channel type to the wrong pin gender. Careful validation should be done when mapping channels through disconnects to ensure the disconnects have channels that pass through them in the correct direction.
28
+ ]
29
+
30
+
31
+ def set_list_type(x):
32
+ global list_type
33
+ list_type = x
34
+
35
+ global COLUMNS
36
+ if list_type == "device":
37
+ COLUMNS = DEVICE_COLUMNS
38
+ elif list_type == "disconnect":
39
+ COLUMNS = DISCONNECT_COLUMNS
40
+
41
+
42
+ def new():
43
+ """
44
+ Creates a new signals TSV file at fileio.path("signals list") with only the header row.
45
+ Overwrites any existing file.
46
+ """
47
+ signals_path = fileio.path("signals list")
48
+ os.makedirs(os.path.dirname(signals_path), exist_ok=True)
49
+
50
+ if os.path.exists(signals_path):
51
+ os.remove(signals_path)
52
+
53
+ with open(signals_path, "w", newline="", encoding="utf-8") as f:
54
+ writer = csv.writer(f, delimiter="\t")
55
+ writer.writerow(COLUMNS)
56
+
57
+
58
+ def append(**kwargs):
59
+ """
60
+ Appends a new row to the signals TSV file.
61
+ Missing optional fields will be written as empty strings.
62
+ Raises ValueError if required fields are missing.
63
+
64
+ Required kwargs:
65
+ For 'device':
66
+ channel_id, signal, connector_name, cavity, connector_mpn, channel_type
67
+ For 'disconnect':
68
+ A_channel_id, A_signal, A_connector_name, A_cavity, A_connector_mpn, A_channel_type,
69
+ B_channel_id, B_signal, B_connector_name, B_cavity, B_connector_mpn, B_channel_type
70
+ """
71
+ signals_path = fileio.path("signals list")
72
+
73
+ # Create the signals list file if it doesn't exist
74
+ if not os.path.exists(signals_path):
75
+ new()
76
+
77
+ # --- Define required fields based on product type ---
78
+ if list_type == "device":
79
+ required = [
80
+ "channel_id",
81
+ "signal",
82
+ "connector_name",
83
+ "cavity",
84
+ "connector_mpn",
85
+ "channel_type",
86
+ ]
87
+ elif list_type == "disconnect":
88
+ required = [
89
+ "channel_id",
90
+ "signal",
91
+ "A_cavity",
92
+ "A_connector_mpn",
93
+ "A_channel_type",
94
+ "B_cavity",
95
+ "B_connector_mpn",
96
+ "B_channel_type",
97
+ ]
98
+ else:
99
+ required = []
100
+
101
+ # --- Check for missing required fields ---
102
+ missing = [key for key in required if not kwargs.get(key)]
103
+ if missing:
104
+ raise ValueError(
105
+ f"Missing required signal fields for '{list_type}': {', '.join(missing)}"
106
+ )
107
+
108
+ # --- Fill row in header order ---
109
+ row = [kwargs.get(col, "") for col in COLUMNS]
110
+
111
+ # --- Append to the signals list ---
112
+ with open(signals_path, "a", newline="", encoding="utf-8") as f:
113
+ writer = csv.writer(f, delimiter="\t", lineterminator="\n")
114
+ writer.writerow(row)
115
+
116
+
117
+ def cavity_of_signal(channel_id, signal, path_to_signals_list):
118
+ for row in fileio.read_tsv(path_to_signals_list):
119
+ if row.get("signal", "").strip() == signal.strip():
120
+ if row.get("channel_id", "").strip() == channel_id.strip():
121
+ return row.get("cavity", "").strip()
122
+ raise ValueError(
123
+ f"Signal {signal} of channel_id {channel_id} not found in {path_to_signals_list}"
124
+ )
125
+
126
+
127
+ def connector_name_of_channel(channel_id, path_to_signals_list):
128
+ if not os.path.exists(path_to_signals_list):
129
+ raise FileNotFoundError(f"Signals list file not found: {path_to_signals_list}")
130
+
131
+ with open(path_to_signals_list, newline="", encoding="utf-8") as f:
132
+ reader = csv.DictReader(f, delimiter="\t")
133
+ for row in reader:
134
+ if row.get("channel_id", "").strip() == channel_id.strip():
135
+ return row.get("connector_name", "").strip()
@@ -0,0 +1 @@
1
+ # Render package for harnice
@@ -0,0 +1,152 @@
1
+ import os
2
+ import csv
3
+ import json
4
+ from harnice import fileio, state
5
+
6
+
7
+ default_desc = "CABLE, FUNCTION, ATTRIBUTES, etc."
8
+
9
+
10
+ def file_structure():
11
+ return {
12
+ f"{state.partnumber('pn-rev')}-attributes.json": "attributes",
13
+ f"{state.partnumber('pn-rev')}-conductor_list.tsv": "conductor list",
14
+ }
15
+
16
+
17
+ def generate_structure():
18
+ pass
19
+
20
+
21
+ def render():
22
+ # ========== Default JSON ==========
23
+ default_attributes = {
24
+ "jacket": {
25
+ "properties": {
26
+ "color": "gray",
27
+ "material": "pvc",
28
+ "od": "0.204in",
29
+ "thickness": "0.028in",
30
+ },
31
+ "shield": {
32
+ "properties": {"type": "foil", "coverage": "100%"},
33
+ "drain_wire": {
34
+ "conductor": True,
35
+ "properties": {"gauge": "20AWG", "construction": "7x28"},
36
+ "appearance": {
37
+ "outline_color": "gray",
38
+ "slash_lines": {"direction": "RH", "color": "gray"},
39
+ },
40
+ },
41
+ "pair_1": {
42
+ "properties": {"twists": "12 per inch"},
43
+ "black": {
44
+ "conductor": True,
45
+ "properties": {
46
+ "insulation material": "polyethylene",
47
+ "od": "0.017in",
48
+ "gauge": "20AWG",
49
+ "construction": "7x28",
50
+ "material": "copper",
51
+ },
52
+ "appearance": {"base_color": "black"},
53
+ },
54
+ "white": {
55
+ "conductor": True,
56
+ "properties": {
57
+ "insulation material": "polyethylene",
58
+ "od": "0.017in",
59
+ "gauge": "20AWG",
60
+ "construction": "7x28",
61
+ "material": "copper",
62
+ },
63
+ "appearance": {"base_color": "white", "outline_color": "black"},
64
+ },
65
+ },
66
+ },
67
+ }
68
+ }
69
+
70
+ attributes_path = fileio.path("attributes")
71
+
72
+ # ========== Load or create attributes.json ==========
73
+ if os.path.exists(attributes_path):
74
+ try:
75
+ with open(attributes_path, "r", encoding="utf-8") as f:
76
+ attrs = json.load(f)
77
+ except Exception as e:
78
+ print(f"[WARNING] Could not load existing attributes.json: {e}")
79
+ attrs = default_attributes
80
+ with open(attributes_path, "w", encoding="utf-8") as f:
81
+ json.dump(attrs, f, indent=4)
82
+ else:
83
+ attrs = default_attributes
84
+ with open(attributes_path, "w", encoding="utf-8") as f:
85
+ json.dump(attrs, f, indent=4)
86
+
87
+ # ========== Traverse and build TSV ==========
88
+ rows = []
89
+ all_headers = {"appearance"} # ensure it exists
90
+
91
+ def recurse(obj, parent_chain=None):
92
+ """Walk through nested dicts, recording conductor rows."""
93
+ if parent_chain is None:
94
+ parent_chain = []
95
+
96
+ if isinstance(obj, dict):
97
+ # Found conductor
98
+ if obj.get("conductor") is True:
99
+ props = obj.get("properties", {})
100
+ appearance = obj.get("appearance", {})
101
+
102
+ # Use previous parent chain to fill in context
103
+ container = parent_chain[-2] if len(parent_chain) >= 2 else ""
104
+ identifier = parent_chain[-1] if len(parent_chain) >= 1 else ""
105
+
106
+ row = {"container": container, "identifier": identifier}
107
+
108
+ for k, v in props.items():
109
+ row[k] = v
110
+ all_headers.add(k)
111
+
112
+ # Convert appearance to JSON string (compact)
113
+ row["appearance"] = (
114
+ json.dumps(
115
+ appearance, separators=(",", ":"), ensure_ascii=False
116
+ ).replace('"', "'")
117
+ if appearance
118
+ else ""
119
+ )
120
+
121
+ rows.append(row)
122
+
123
+ # Continue traversing deeper
124
+ for k, v in obj.items():
125
+ if isinstance(v, (dict, list)):
126
+ recurse(v, parent_chain + [k])
127
+
128
+ elif isinstance(obj, list):
129
+ for item in obj:
130
+ recurse(item, parent_chain)
131
+
132
+ recurse(attrs)
133
+
134
+ if not rows:
135
+ print("[WARNING] No conductor entries found.")
136
+ return
137
+
138
+ # Define header order
139
+ headers = ["container", "identifier"] + sorted(all_headers)
140
+
141
+ # Write to TSV
142
+ conductor_list_path = fileio.path("conductor list")
143
+ with open(conductor_list_path, "w", newline="", encoding="utf-8") as f:
144
+ writer = csv.DictWriter(
145
+ f, fieldnames=headers, delimiter="\t", lineterminator="\n"
146
+ )
147
+ writer.writeheader()
148
+ writer.writerows(rows)
149
+
150
+ print(
151
+ f"\ncable rendered successfully! wrote {len(rows)} rows to:\n{conductor_list_path}\n"
152
+ )
@@ -0,0 +1,80 @@
1
+ import os
2
+ import ast
3
+ from harnice import fileio
4
+ from harnice.utils import library_utils
5
+
6
+
7
+ def file_structure():
8
+ return {}
9
+
10
+
11
+ def generate_structure():
12
+ pass
13
+
14
+
15
+ def path(channel_type):
16
+ """
17
+ Args:
18
+ channel_type: tuple like (chid, lib_repo) or string like "(5, '...')"
19
+ """
20
+ chid, lib_repo = parse(channel_type)
21
+ base_dir = library_utils.get_local_path(lib_repo)
22
+ return os.path.join(base_dir, "channel_types", "channel_types.tsv")
23
+
24
+
25
+ def parse(val):
26
+ """Convert stored string into a tuple (chid:int, lib_repo:str)."""
27
+ if not val:
28
+ return None
29
+ if isinstance(val, tuple):
30
+ chid, lib_repo = val
31
+ else:
32
+ chid, lib_repo = ast.literal_eval(str(val))
33
+ return (int(chid), str(lib_repo).strip())
34
+
35
+
36
+ def compatibles(channel_type):
37
+ """
38
+ Look up compatible channel_types for the given channel_type.
39
+ Splits the TSV field by commas and parses each entry into (chid, lib_repo).
40
+ """
41
+ channel_type_id, lib_repo = parse(channel_type)
42
+ for row in fileio.read_tsv(path((channel_type_id, lib_repo))):
43
+ if str(channel_type_id) != str(row.get("channel_type_id")):
44
+ continue
45
+
46
+ signals_str = row.get("compatible_channel_types", "").strip()
47
+ if not signals_str:
48
+ return []
49
+
50
+ values = [v.strip() for v in signals_str.split(";") if v.strip()]
51
+ parsed = []
52
+ for v in values:
53
+ parsed.append(parse(v))
54
+ return parsed
55
+
56
+ return []
57
+
58
+
59
+ def is_or_is_compatible_with(channel_type):
60
+ output = []
61
+ output.append(parse(channel_type))
62
+ for compatible in compatibles(channel_type):
63
+ output.append(compatible)
64
+ return output
65
+
66
+
67
+ # search channel_types.tsv
68
+ def signals(channel_type):
69
+ chid, lib_repo = parse(channel_type)
70
+
71
+ ch_types_tsv_path = os.path.join(
72
+ library_utils.get_local_path(lib_repo), "channel_types", "channel_types.tsv"
73
+ )
74
+
75
+ for row in fileio.read_tsv(ch_types_tsv_path):
76
+ if str(row.get("channel_type_id", "")).strip() == str(chid):
77
+ return [
78
+ sig.strip() for sig in row.get("signals", "").split(",") if sig.strip()
79
+ ]
80
+ return []