IGJSP 1.0.1__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
IGJSP/generador.py CHANGED
@@ -26,6 +26,42 @@ def t(c):
26
26
  return 4.0704 * np.log(2) / np.log(1 + (c* 2.5093)**3)
27
27
 
28
28
 
29
+ # ------- Helpers internos para el DZN -------
30
+
31
+ def _parse_int_var(text, name, default=None):
32
+ """
33
+ Busca una variable escalar tipo: name = 10;
34
+ """
35
+ m = re.search(rf'\b{name}\b\s*=\s*([0-9]+)', text)
36
+ if m:
37
+ return int(m.group(1))
38
+ if default is not None:
39
+ return default
40
+ raise ValueError(f"No se encontró la variable entera '{name}' en el fichero DZN.")
41
+
42
+
43
+ def _parse_array_var(text, name):
44
+ """
45
+ Busca una variable array tipo:
46
+ name = [1, 2, 3];
47
+ o
48
+ name = [1 2 3];
49
+ Devuelve un np.array de ints o None si no está.
50
+ """
51
+ m = re.search(rf'\b{name}\b\s*=.*?\[(.*?)\];', text, re.DOTALL)
52
+ if not m:
53
+ return None
54
+
55
+ inner = m.group(1).strip()
56
+ if not inner:
57
+ return np.array([], dtype=int)
58
+
59
+ # Separar por comas o espacios
60
+ tokens = re.split(r'[\s,]+', inner)
61
+ tokens = [t for t in tokens if t != '']
62
+
63
+ return np.array([int(t) for t in tokens], dtype=int)
64
+
29
65
  #################################################################################
30
66
  # #
31
67
  # JSP #
@@ -124,10 +160,325 @@ class JSP:
124
160
  else:
125
161
  return expon(loc=duration, scale=duration/2).rvs()
126
162
 
127
- def savePythonFile(self, path):
128
- os.makedirs(os.path.dirname(path), exist_ok=True)
129
- with open(path, 'wb') as f:
130
- pickle.dump(self, f)
163
+ def loadPythonFile(path):
164
+ """
165
+ Carga un fichero .pkl generado por savePythonFile y devuelve un JSP.
166
+ """
167
+ with open(path, 'rb') as f:
168
+ obj = pickle.load(f)
169
+
170
+ # Si ya es un JSP, lo devolvemos tal cual
171
+ if isinstance(obj, JSP):
172
+ return obj
173
+
174
+ # Si es un dict con la misma estructura que usamos en loadJsonFile, construimos el JSP
175
+ if isinstance(obj, dict) and all(
176
+ k in obj for k in ['jobs', 'machines', 'ProcessingTime', 'EnergyConsumption', 'ReleaseDateDueDate', 'Orden']
177
+ ):
178
+ return JSP(**obj)
179
+
180
+ raise TypeError(
181
+ f"El objeto cargado desde {path} no es un JSP ni un diccionario compatible para construir uno."
182
+ )
183
+
184
+ def loadDznFile(path):
185
+ """
186
+ Carga un .dzn generado por saveDznFile y devuelve un JSP.
187
+ Soporta rddd = 0, 1, 2 (sin fechas, fechas por job, fechas por operación).
188
+ """
189
+ with open(path, 'r', encoding='utf-8') as f:
190
+ text = f.read()
191
+
192
+ numJobs = _parse_int_var(text, 'jobs')
193
+ numMchs = _parse_int_var(text, 'machines')
194
+ speed = _parse_int_var(text, 'Speed', default=1)
195
+
196
+ time_flat = _parse_array_var(text, 'time')
197
+ energy_flat = _parse_array_var(text, 'energy')
198
+ prec_flat = _parse_array_var(text, 'precedence')
199
+
200
+ if time_flat is None or energy_flat is None or prec_flat is None:
201
+ raise ValueError("Faltan variables obligatorias (time, energy o precedence) en el fichero DZN.")
202
+
203
+ # Comprobar tamaños
204
+ expected_te = numJobs * numMchs * speed
205
+ if time_flat.size != expected_te or energy_flat.size != expected_te:
206
+ raise ValueError(
207
+ f"Tamaños incompatibles en time/energy: esperado {expected_te}, "
208
+ f"time={time_flat.size}, energy={energy_flat.size}"
209
+ )
210
+
211
+ expected_prec = numJobs * numMchs
212
+ if prec_flat.size != expected_prec:
213
+ raise ValueError(
214
+ f"Tamaño incompatible en precedence: esperado {expected_prec}, precedence={prec_flat.size}"
215
+ )
216
+
217
+ ProcessingTime = time_flat.reshape((numJobs, numMchs, speed))
218
+ EnergyConsumption = energy_flat.reshape((numJobs, numMchs, speed))
219
+ precedence = prec_flat.reshape((numJobs, numMchs))
220
+
221
+ # Reconstruir Orden a partir de la precedencia (cada fila es posiciones 0..numMchs-1 por máquina)
222
+ Orden = np.zeros((numJobs, numMchs), dtype=int)
223
+ for j in range(numJobs):
224
+ # precedence[j, m] = posición de la máquina m en la secuencia
225
+ # argsort da el índice de la máquina por orden de prioridad
226
+ Orden[j, :] = np.argsort(precedence[j, :])
227
+
228
+ # Release / Due dates (pueden no existir)
229
+ release_flat = _parse_array_var(text, 'releaseDate')
230
+ due_flat = _parse_array_var(text, 'dueDate')
231
+
232
+ if release_flat is None or due_flat is None:
233
+ # rddd = 0
234
+ ReleaseDueDate = np.array([])
235
+ else:
236
+ # O bien vector por job, o matriz jobs x machines
237
+ if release_flat.size == numJobs and due_flat.size == numJobs:
238
+ # rddd = 1 → (numJobs, 2)
239
+ ReleaseDueDate = np.zeros((numJobs, 2), dtype=int)
240
+ ReleaseDueDate[:, 0] = release_flat
241
+ ReleaseDueDate[:, 1] = due_flat
242
+ elif release_flat.size == numJobs * numMchs and due_flat.size == numJobs * numMchs:
243
+ # rddd = 2 → (numJobs, numMchs, 2)
244
+ ReleaseDueDate = np.zeros((numJobs, numMchs, 2), dtype=int)
245
+ ReleaseDueDate[:, :, 0] = release_flat.reshape((numJobs, numMchs))
246
+ ReleaseDueDate[:, :, 1] = due_flat.reshape((numJobs, numMchs))
247
+ else:
248
+ raise ValueError(
249
+ "Los tamaños de releaseDate/dueDate no cuadran ni con rddd=1 ni con rddd=2."
250
+ )
251
+
252
+ sol = {
253
+ 'jobs': numJobs,
254
+ 'machines': numMchs,
255
+ 'ProcessingTime': ProcessingTime,
256
+ 'EnergyConsumption': EnergyConsumption,
257
+ 'ReleaseDateDueDate': ReleaseDueDate,
258
+ 'Orden': Orden
259
+ }
260
+ return JSP(**sol)
261
+
262
+ def loadTaillardFile(path):
263
+ """
264
+ Carga un fichero de texto generado por saveTaillardStandardFile y devuelve un JSP.
265
+ Formato esperado:
266
+
267
+ Number of jobs: J
268
+ Number of machines: M
269
+
270
+ Processing times:
271
+ ... J filas, cada una con M enteros ...
272
+
273
+ Energy consumption:
274
+ ... J filas, cada una con M enteros ...
275
+
276
+ Machine order:
277
+ ... J filas, cada una con M enteros ...
278
+ """
279
+ with open(path, 'r') as f:
280
+ lines = [line.strip() for line in f]
281
+
282
+ # Leer encabezado
283
+ # Number of jobs: X
284
+ # Number of machines: Y
285
+ numJobs = None
286
+ numMchs = None
287
+
288
+ i = 0
289
+ while i < len(lines):
290
+ line = lines[i]
291
+ if line.startswith("Number of jobs"):
292
+ numJobs = int(line.split(":")[1].strip())
293
+ elif line.startswith("Number of machines"):
294
+ numMchs = int(line.split(":")[1].strip())
295
+ if numJobs is not None and numMchs is not None:
296
+ i += 1
297
+ break
298
+ i += 1
299
+
300
+ if numJobs is None or numMchs is None:
301
+ raise ValueError("No se pudieron leer numJobs / numMchs del fichero Taillard.")
302
+
303
+ # Saltar líneas vacías hasta "Processing times:"
304
+ while i < len(lines) and lines[i] == "":
305
+ i += 1
306
+ if i >= len(lines) or not lines[i].startswith("Processing times"):
307
+ raise ValueError("No se encontró la sección 'Processing times:' en el fichero Taillard.")
308
+ i += 1 # pasar la línea de cabecera
309
+
310
+ # Leer matriz de tiempos de procesamiento (J filas)
311
+ proc_by_order = np.zeros((numJobs, numMchs), dtype=int)
312
+ for j in range(numJobs):
313
+ while i < len(lines) and lines[i] == "":
314
+ i += 1
315
+ parts = lines[i].split()
316
+ if len(parts) != numMchs:
317
+ raise ValueError(
318
+ f"Línea de tiempos de procesamiento para job {j} tiene {len(parts)} elementos, "
319
+ f"pero se esperaban {numMchs}."
320
+ )
321
+ proc_by_order[j, :] = [int(x) for x in parts]
322
+ i += 1
323
+
324
+ # Saltar hasta "Energy consumption:"
325
+ while i < len(lines) and lines[i] == "":
326
+ i += 1
327
+ if i >= len(lines) or not lines[i].startswith("Energy consumption"):
328
+ raise ValueError("No se encontró la sección 'Energy consumption:' en el fichero Taillard.")
329
+ i += 1 # cabecera
330
+
331
+ energy_by_order = np.zeros((numJobs, numMchs), dtype=int)
332
+ for j in range(numJobs):
333
+ while i < len(lines) and lines[i] == "":
334
+ i += 1
335
+ parts = lines[i].split()
336
+ if len(parts) != numMchs:
337
+ raise ValueError(
338
+ f"Línea de consumo de energía para job {j} tiene {len(parts)} elementos, "
339
+ f"pero se esperaban {numMchs}."
340
+ )
341
+ energy_by_order[j, :] = [int(x) for x in parts]
342
+ i += 1
343
+
344
+ # Saltar hasta "Machine order:"
345
+ while i < len(lines) and lines[i] == "":
346
+ i += 1
347
+ if i >= len(lines) or not lines[i].startswith("Machine order"):
348
+ raise ValueError("No se encontró la sección 'Machine order:' en el fichero Taillard.")
349
+ i += 1 # cabecera
350
+
351
+ Orden = np.zeros((numJobs, numMchs), dtype=int)
352
+ for j in range(numJobs):
353
+ while i < len(lines) and lines[i] == "":
354
+ i += 1
355
+ parts = lines[i].split()
356
+ if len(parts) != numMchs:
357
+ raise ValueError(
358
+ f"Línea de orden de máquinas para job {j} tiene {len(parts)} elementos, "
359
+ f"pero se esperaban {numMchs}."
360
+ )
361
+ Orden[j, :] = [int(x) for x in parts]
362
+ i += 1
363
+
364
+ # Reconstruir ProcessingTime y EnergyConsumption con speed=1
365
+ speed = 1
366
+ ProcessingTime = np.zeros((numJobs, numMchs, speed), dtype=int)
367
+ EnergyConsumption = np.zeros((numJobs, numMchs, speed), dtype=int)
368
+
369
+ for j in range(numJobs):
370
+ for pos in range(numMchs):
371
+ machine = Orden[j, pos]
372
+ ProcessingTime[j, machine, 0] = proc_by_order[j, pos]
373
+ EnergyConsumption[j, machine, 0] = energy_by_order[j, pos]
374
+
375
+ # Taillard estándar: sin release/due dates → rddd=0
376
+ ReleaseDueDate = np.array([])
377
+
378
+ sol = {
379
+ 'jobs': numJobs,
380
+ 'machines': numMchs,
381
+ 'ProcessingTime': ProcessingTime,
382
+ 'EnergyConsumption': EnergyConsumption,
383
+ 'ReleaseDateDueDate': ReleaseDueDate,
384
+ 'Orden': Orden
385
+ }
386
+ return JSP(**sol)
387
+
388
+ def loadJsonFile(path):
389
+ with open(path, "r") as f:
390
+ data = json.load(f)
391
+ numJobs = len(data["nbJobs"])
392
+ numMchs = len(data["nbMchs"])
393
+ speed = data["speed"]
394
+
395
+ # # Load KPIs (opcional)
396
+ # min_makespan = data.get("minMakespan", None)
397
+ # min_energy = data.get("minEnergy", None)
398
+ # max_min_makespan = data.get("maxMinMakespan", None)
399
+ # max_min_energy = data.get("maxMinEnergy", None)
400
+
401
+ # Prepare empty structures
402
+ ProcessingTime = np.zeros((numJobs, numMchs, speed), dtype=int)
403
+ EnergyConsumption = np.zeros((numJobs, numMchs, speed), dtype=int)
404
+ Orden_list = [[] for _ in range(numJobs)]
405
+
406
+ # Detect rddd mode
407
+ # rddd = 0 → no release/due dates
408
+ # rddd = 1 → job-level RDF
409
+ # rddd = 2 → operation-level RDF
410
+ rddd = 0
411
+ if data["timeEnergy"]:
412
+ if "release-date" in data["timeEnergy"][0]:
413
+ rddd = 1
414
+ for m in data["timeEnergy"][0]["operations"]:
415
+ if "release-date" in data["timeEnergy"][0]["operations"][m]:
416
+ rddd = 2
417
+ break
418
+
419
+ # Initialize ReleaseDueDate array according to rddd
420
+ if rddd == 1:
421
+ ReleaseDueDate = np.zeros((numJobs, 2), dtype=int)
422
+ elif rddd == 2:
423
+ ReleaseDueDate = np.zeros((numJobs, numMchs, 2), dtype=int)
424
+ else:
425
+ # No dates: devolver array vacío para que __init__ detecte rddd=0
426
+ ReleaseDueDate = np.array([])
427
+
428
+ # -------------------------
429
+ # Load jobs & operations
430
+ # -------------------------
431
+ for job_data in data["timeEnergy"]:
432
+ job = int(job_data["jobId"])
433
+
434
+ # Optional job-level release/due dates
435
+ if rddd == 1:
436
+ ReleaseDueDate[job, 0] = int(job_data["release-date"])
437
+ ReleaseDueDate[job, 1] = int(job_data["due-date"])
438
+
439
+ for machine_str, op_data in job_data["operations"].items():
440
+ machine = int(machine_str)
441
+ Orden_list[job].append(machine)
442
+
443
+ # Load speed-scaling arrays
444
+ proc_times = [int(entry["procTime"]) for entry in op_data["speed-scaling"]]
445
+ energies = [int(entry["energyCons"]) for entry in op_data["speed-scaling"]]
446
+
447
+ # Aseguramos longitud speed
448
+ # Si speed > len(proc_times) -> rellenamos con ceros (o ajustar según tu política)
449
+ proc_arr = np.zeros((speed,), dtype=int)
450
+ en_arr = np.zeros((speed,), dtype=int)
451
+ L = min(len(proc_times), speed)
452
+ proc_arr[:L] = proc_times[:L]
453
+ en_arr[:L] = energies[:L]
454
+
455
+ ProcessingTime[job, machine, :] = proc_arr
456
+ EnergyConsumption[job, machine, :] = en_arr
457
+
458
+ if rddd == 2:
459
+ ReleaseDueDate[job, machine, 0] = int(op_data["release-date"])
460
+ ReleaseDueDate[job, machine, 1] = int(op_data["due-date"])
461
+
462
+ # Convertir Orden a ndarray (shape = numJobs x numMchs)
463
+ Orden = np.zeros((numJobs, numMchs), dtype=int)
464
+ for j in range(numJobs):
465
+ if len(Orden_list[j]) != numMchs:
466
+ # Si por algun motivo no tiene todas las máquinas,
467
+ # rellenamos con -1 o lanzamos error; aquí uso -1.
468
+ row = Orden_list[j] + [-1] * (numMchs - len(Orden_list[j]))
469
+ else:
470
+ row = Orden_list[j]
471
+ Orden[j, :] = np.array(row, dtype=int)
472
+
473
+ sol = {
474
+ 'jobs': numJobs,
475
+ 'machines': numMchs,
476
+ 'ProcessingTime': ProcessingTime,
477
+ 'EnergyConsumption': EnergyConsumption,
478
+ 'ReleaseDateDueDate': ReleaseDueDate, # <-- ahora es array, no int
479
+ 'Orden': Orden
480
+ }
481
+ return JSP(**sol)
131
482
 
132
483
  def saveJsonFile(self, path):
133
484
  self.JSP = {
@@ -150,13 +501,13 @@ class JSP:
150
501
  for machine in self.Orden[job]:
151
502
  machine = int(machine)
152
503
  new["operations"][machine] = {"speed-scaling" :
153
- [
504
+ [
154
505
  {"procTime" : int(proc),
155
- "energyCons" : int(energy)
506
+ "energyCons" : int(energy)
156
507
  }
157
508
  for proc, energy in zip(self.ProcessingTime[job, machine],self.EnergyConsumption[job, machine])
158
- ]
159
- }
509
+ ]
510
+ }
160
511
  if self.rddd == 2:
161
512
  new["operations"][machine]["release-date"] = int(self.ReleaseDueDate[job][machine][0])
162
513
  new["operations"][machine]["due-date"] = int(self.ReleaseDueDate[job][machine][1])
@@ -192,6 +543,11 @@ class JSP:
192
543
 
193
544
  f.write(json_str)
194
545
 
546
+ def savePythonFile(self, path):
547
+ os.makedirs(os.path.dirname(path), exist_ok=True)
548
+ with open(path, 'wb') as f:
549
+ pickle.dump(self, f)
550
+
195
551
  def saveDznFile(self, InputDir, OutputDir):
196
552
  indexProblema = OutputDir.split("/")[-2]
197
553
  OutputDir = "/".join(OutputDir.split("/")[:-2])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: IGJSP
3
- Version: 1.0.1
3
+ Version: 1.1.1
4
4
  Summary: Instance generator for JSP
5
5
  Project-URL: Homepage, https://gps.blogs.upv.es/
6
6
  Author-email: GPS-UPV <gps@dsic.upv.es>
@@ -1,4 +1,4 @@
1
- IGJSP/generador.py,sha256=w9SxkVC3oj4inIH9gP_CKvOecMm84_TVS08Kbc12A6E,34349
1
+ IGJSP/generador.py,sha256=vpopW1e8zwZ5H69uvCSY28KJuP-XGNM6VQPKCTawiAs,48598
2
2
  IGJSP/main.py,sha256=Sia5Ss8O3HWBdshvPLJKUMaZIoQPHy6x8yzvojojPFo,2838
3
3
  IGJSP/Minizinc/Models/RD/JSP0.mzn,sha256=cfN_E3RQ6nBulGfaOOYTd-zAgA5SI6E2saDlYtKCflg,2282
4
4
  IGJSP/Minizinc/Models/RD/JSP1.mzn,sha256=5B8cyw2WyKR8yEL1fFd0TaCAVhjPoxEJRJDPPEjJGEk,2840
@@ -12,7 +12,7 @@ IGJSP/Minizinc/Types/RD/FJSP/type2.dzn,sha256=Wz1MnkSL5GUPsbh1eq0leoaQRImkNqQqkX
12
12
  IGJSP/Minizinc/Types/RD/JSP/type0.dzn,sha256=wNuPQkXBXPSpPaPz2WFhp4pGDgfSimtg4I93UfwC01Q,263
13
13
  IGJSP/Minizinc/Types/RD/JSP/type1.dzn,sha256=Xbt9StzCgEqqh_HS9tWGrTVtu-OEnf5Yq5Ty91AkzoM,333
14
14
  IGJSP/Minizinc/Types/RD/JSP/type2.dzn,sha256=L2nc7bPJEhyuaEwgw0ZCpC52CpVJILQU_WQdKn8GUZs,379
15
- igjsp-1.0.1.dist-info/METADATA,sha256=xBsUY3HSGQvowsRaCPdf1wmwU8N9N_c9QykR2Y7dZs4,10609
16
- igjsp-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
17
- igjsp-1.0.1.dist-info/licenses/LICENSE,sha256=f7RDRO-z_nMoooAya7NAb8sXtrHR6WnttYtyUc9fB-c,1116
18
- igjsp-1.0.1.dist-info/RECORD,,
15
+ igjsp-1.1.1.dist-info/METADATA,sha256=JtfzaTnLrHW2GMG__daue2UjaEjh9_0UwgDMFDjVCRI,10609
16
+ igjsp-1.1.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
+ igjsp-1.1.1.dist-info/licenses/LICENSE,sha256=f7RDRO-z_nMoooAya7NAb8sXtrHR6WnttYtyUc9fB-c,1116
18
+ igjsp-1.1.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any