gamsapi 52.4.0__cp314-cp314-win_amd64.whl → 53.0.0rc1__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. gams/_version.py +1 -1
  2. gams/connect/agents/_excel/excelagent.py +5 -3
  3. gams/connect/agents/_excel/workbook.py +15 -0
  4. gams/connect/agents/_sqlconnectors/_accesshandler.py +1 -0
  5. gams/connect/agents/_sqlconnectors/_databasehandler.py +17 -0
  6. gams/connect/agents/_sqlconnectors/_sqlitehandler.py +1 -0
  7. gams/connect/agents/connectagent.py +9 -2
  8. gams/connect/agents/csvreader.py +74 -16
  9. gams/connect/agents/excelreader.py +6 -2
  10. gams/connect/agents/schema/ExcelWriter.yaml +8 -2
  11. gams/connect/agents/sqlreader.py +5 -1
  12. gams/connect/agents/sqlwriter.py +2 -0
  13. gams/core/cfg/_cfgmcc.cp314-win_amd64.pyd +0 -0
  14. gams/core/dct/_dctmcc.cp314-win_amd64.pyd +0 -0
  15. gams/core/gdx/_gdxcc.cp314-win_amd64.pyd +0 -0
  16. gams/core/gev/_gevmcc.cp314-win_amd64.pyd +0 -0
  17. gams/core/gmd/_gmdcc.cp314-win_amd64.pyd +0 -0
  18. gams/core/gmo/_gmomcc.cp314-win_amd64.pyd +0 -0
  19. gams/core/idx/_idxcc.cp314-win_amd64.pyd +0 -0
  20. gams/core/numpy/_gams2numpy.cp314-win_amd64.pyd +0 -0
  21. gams/core/opt/_optcc.cp314-win_amd64.pyd +0 -0
  22. gams/tools/toolcollection/data/sqlitewrite.py +92 -99
  23. gams/transfer/_internals/algorithms.py +2 -2
  24. gams/transfer/containers/_container.py +109 -61
  25. gams/transfer/containers/_io/gdx.py +3 -2
  26. gams/transfer/containers/_io/gmd.py +5 -4
  27. gams/transfer/containers/_mixins/ccc.py +276 -271
  28. gams/transfer/syms/_mixins/pivot.py +3 -3
  29. gams/transfer/syms/_mixins/ve.py +4 -1
  30. gams/transfer/syms/container_syms/_parameter.py +2 -0
  31. {gamsapi-52.4.0.dist-info → gamsapi-53.0.0rc1.dist-info}/METADATA +17 -17
  32. {gamsapi-52.4.0.dist-info → gamsapi-53.0.0rc1.dist-info}/RECORD +35 -35
  33. {gamsapi-52.4.0.dist-info → gamsapi-53.0.0rc1.dist-info}/WHEEL +1 -1
  34. {gamsapi-52.4.0.dist-info → gamsapi-53.0.0rc1.dist-info}/licenses/LICENSE +0 -0
  35. {gamsapi-52.4.0.dist-info → gamsapi-53.0.0rc1.dist-info}/top_level.txt +0 -0
gams/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "52.4.0"
1
+ __version__ = "53.0.0rc1"
@@ -186,8 +186,8 @@ class ExcelAgent(ConnectAgent):
186
186
  def parse_range(self, sym_range, wb, clear_sheet=False, create_missing=False):
187
187
  sheet, rng = self._split_range(sym_range, wb)
188
188
  toc_range = f"'{sheet}'!{rng.split(':')[0]}"
189
- sheet = self.sheet_by_name(sheet, wb, clear_sheet, create_missing)
190
189
  nw_col, nw_row, se_col, se_row = self._range_to_coords(rng)
190
+ sheet = self.sheet_by_name(sheet, wb, clear_sheet, create_missing, nw_col, nw_row, se_col, se_row)
191
191
  return sheet, nw_col, nw_row, se_col, se_row, toc_range
192
192
 
193
193
  def parse_index(self, index, wb, index_parameter_map):
@@ -240,12 +240,14 @@ class ExcelAgent(ConnectAgent):
240
240
  symbols.append(sym)
241
241
  return symbols
242
242
 
243
- def sheet_by_name(self, sheet, wb, clear_sheet=False, create_missing=False):
243
+ def sheet_by_name(self, sheet, wb, clear_sheet=False, create_missing=False, nw_col=None, nw_row=None, se_col=None, se_row=None):
244
244
  for idx, s in enumerate(wb.sheetnames):
245
245
  if sheet.lower() == s.lower():
246
- if clear_sheet:
246
+ if clear_sheet is True:
247
247
  wb.delete_sheet(s)
248
248
  return wb.create_sheet(s, idx)
249
+ if clear_sheet == "range":
250
+ wb.delete_range(s, nw_col, nw_row, se_col, se_row)
249
251
  return wb.get_sheet(s)
250
252
  if create_missing:
251
253
  return wb.create_sheet(sheet)
@@ -111,6 +111,21 @@ class Workbook:
111
111
  else:
112
112
  raise Exception("Invalid engine.")
113
113
 
114
+ def delete_range(self, sheet_name, nw_col, nw_row, se_col, se_row):
115
+ if self._engine == "xlwings":
116
+ raise Exception("Not imlemented.")
117
+ elif self._engine == "openpyxl":
118
+ sheet = self._wb[sheet_name]
119
+ if se_row is None:
120
+ se_row = sheet.max_row
121
+ if se_col is None:
122
+ se_col = sheet.max_column
123
+ for row in range(nw_row+1, se_row + 1):
124
+ for col in range(nw_col+1, se_col + 1):
125
+ sheet.cell(row=row, column=col).value = None
126
+ else:
127
+ raise Exception("Invalid engine.")
128
+
114
129
  def delete_sheet(self, sheet_name):
115
130
  if self._engine == "xlwings":
116
131
  self._wb.sheets[sheet_name].delete()
@@ -30,6 +30,7 @@ from gams.connect.agents._sqlconnectors._databasehandler import DatabaseConnecto
30
30
  class AccessConnector(DatabaseConnector):
31
31
  SUPPORTED_INSERT_METHODS = ["default", "bulkInsert"]
32
32
  QUOTE_CHAR = ["[]", '""', "``"]
33
+ SCHEMA_SUPPORT = False
33
34
 
34
35
  @staticmethod
35
36
  def _create_accdb(dbpath):
@@ -45,10 +45,20 @@ class ConnectionType(Enum):
45
45
  SQLSERVER = "sqlserver"
46
46
  ACCESS = "access"
47
47
 
48
+ CONNECTOR = {
49
+ "SQLiteConnector": ConnectionType.SQLITE.value,
50
+ "SQLAlchemyConnector": ConnectionType.SQLALCHEMY.value,
51
+ "PyodbcConnector": ConnectionType.PYODBC.value,
52
+ "MySQLConnector": ConnectionType.MYSQL.value,
53
+ "PostgresConnector": ConnectionType.POSTGRES.value,
54
+ "SQLServerConnector": ConnectionType.SQLSERVER.value,
55
+ "AccessConnector": ConnectionType.ACCESS.value,
56
+ }
48
57
 
49
58
  class DatabaseConnector(ABC):
50
59
  SUPPORTED_INSERT_METHODS = []
51
60
  QUOTE_CHAR = []
61
+ SCHEMA_SUPPORT = True
52
62
 
53
63
  def __init__(
54
64
  self,
@@ -70,6 +80,13 @@ class DatabaseConnector(ABC):
70
80
  f"Valid methods are >{self.SUPPORTED_INSERT_METHODS}<"
71
81
  )
72
82
 
83
+ def is_schema_available(self, schema: str | None):
84
+ """Checks if the connectionType allows schema"""
85
+ if (not self.SCHEMA_SUPPORT) and (schema is not None):
86
+ self._raise_error(
87
+ f"connectionType >{CONNECTOR[type(self).__name__]}< does not support schema but schemaName is set to >{schema}<."
88
+ )
89
+
73
90
  def read_table(self, sql_query: str, read_sql_args: dict) -> pd.DataFrame:
74
91
  """
75
92
  Read data from select DBMS using the provided SQL Query. Returns a pandas.DataFrame
@@ -31,6 +31,7 @@ from gams.connect.agents._sqlconnectors._databasehandler import DatabaseConnecto
31
31
  class SQLiteConnector(DatabaseConnector):
32
32
  SUPPORTED_INSERT_METHODS = ["default"]
33
33
  QUOTE_CHAR = ["[]", '""', "``"]
34
+ SCHEMA_SUPPORT = False
34
35
 
35
36
  def connect(self, connection_details, connection_args, **kwargs) -> None:
36
37
 
@@ -381,14 +381,21 @@ class ConnectAgent(ABC):
381
381
  # pandas-version-check
382
382
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
383
383
  df.iloc[:, -1] = df.iloc[:, -1].replace(vs)
384
- else: # pandas >= 2.2.0
384
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
385
385
  with pd.option_context("future.no_silent_downcasting", True):
386
386
  df.iloc[:, -1] = df.iloc[:, -1].replace(vs).infer_objects()
387
+ else: # pandas >= 3.0.0
388
+ df.iloc[:, -1] = df.iloc[:, -1].replace(vs).infer_objects()
389
+
387
390
  else:
388
391
  # pandas-version-check
389
392
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
390
393
  df.iloc[~mask, -1] = df.iloc[~mask, -1].replace(vs)
391
- else: # pandas >= 2.2.0
394
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
395
+ df.iloc[~mask, -1] = (
396
+ df.iloc[~mask, -1].replace(vs).infer_objects()
397
+ )
398
+ else: # pandas >= 3.0.0
392
399
  with pd.option_context("future.no_silent_downcasting", True):
393
400
  df.iloc[~mask, -1] = (
394
401
  df.iloc[~mask, -1].replace(vs).infer_objects()
@@ -325,9 +325,11 @@ class CSVReader(ConnectAgent):
325
325
  # pandas-version-check
326
326
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
327
327
  df.isetitem(-1, df.iloc[:, -1].replace(self._value_sub))
328
- else: # pandas >= 2.2.0
328
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
329
329
  with pd.option_context("future.no_silent_downcasting", True):
330
330
  df.isetitem(-1, df.iloc[:, -1].replace(self._value_sub).infer_objects())
331
+ else: # pandas >= 3.0.0
332
+ df.isetitem(-1, df.iloc[:, -1].replace(self._value_sub).infer_objects())
331
333
 
332
334
  if self._trace > 2:
333
335
  self._cdb.print_log(f"DataFrame after value substitution:\n{df}")
@@ -426,9 +428,10 @@ class CSVReader(ConnectAgent):
426
428
  # pandas-version-check
427
429
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
428
430
  df = df.stack(level=df.columns.names, dropna=False)
429
- else: # pandas >= 2.2.0
431
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
430
432
  df = df.stack(level=df.columns.names, future_stack=True)
431
-
433
+ else: # pandas >= 3.0.0
434
+ df = df.stack(level=df.columns.names)
432
435
  return df
433
436
 
434
437
  def _sort_value_columns(self, df: pd.DataFrame) -> pd.DataFrame:
@@ -495,6 +498,36 @@ class CSVReader(ConnectAgent):
495
498
  )
496
499
 
497
500
  return df
501
+
502
+ def _create_sym(self, df, domain):
503
+ """Creates symbol (parameter or set) in the Connect container
504
+
505
+ Parameters
506
+ ----------
507
+ df : pd.DataFrame | None
508
+ Symbol records
509
+ domain : list
510
+ Symbol domains
511
+
512
+ Returns
513
+ -------
514
+ Symbol object
515
+ Created symbol
516
+ """
517
+ if self._sym_type == "par":
518
+ sym = self._cdb.container.addParameter(
519
+ self._name,
520
+ domain,
521
+ records=df,
522
+ )
523
+ else:
524
+ sym = self._cdb.container.addSet(
525
+ self._name,
526
+ domain,
527
+ records=df,
528
+ )
529
+
530
+ return sym
498
531
 
499
532
  def execute(self):
500
533
  if self._trace > 0:
@@ -543,7 +576,37 @@ class CSVReader(ConnectAgent):
543
576
  f"Arguments for reading the CSV file:\n{self._read_csv_args}"
544
577
  )
545
578
 
546
- df = pd.read_csv(self._file, **self._read_csv_args)
579
+ try:
580
+ df = pd.read_csv(self._file, **self._read_csv_args)
581
+
582
+ except pd.errors.EmptyDataError:
583
+ if self._trace > 1:
584
+ self._cdb.print_log(
585
+ "Empty data after reading CSV file."
586
+ )
587
+
588
+ # set dimensionality
589
+ dim = len(self._index_cols)
590
+ if self._stack:
591
+ if self._multiheader:
592
+ dim += len(self._header)
593
+ else:
594
+ dim += 1
595
+
596
+ sym = self._create_sym(None, ["*"] * dim)
597
+
598
+ # For symbols with None records, empty df is assigned
599
+ self._transform_sym_none_to_empty(sym)
600
+
601
+ if self._trace > 2:
602
+ self._cdb.print_log(
603
+ f"Connect Container symbol={self._name}:\n {sym.records}\n"
604
+ )
605
+
606
+ if self._trace > 0:
607
+ self._describe_container(self._cdb.container, "Connect Container (after):")
608
+
609
+ return
547
610
 
548
611
  if self._trace > 2:
549
612
  self._cdb.print_log(
@@ -569,11 +632,7 @@ class CSVReader(ConnectAgent):
569
632
  if self._auto_col is not None and not df.columns.empty:
570
633
  self._generate_column_labels(df)
571
634
 
572
- if self._stack:
573
- dim += 1
574
- domain.append("*")
575
-
576
- elif self._stack:
635
+ if self._stack:
577
636
  if self._multiheader:
578
637
  dim += len(self._header)
579
638
  domain.extend(
@@ -596,8 +655,12 @@ class CSVReader(ConnectAgent):
596
655
  pd.__version__, "2.2"
597
656
  ): # pandas < 2.2.0
598
657
  df = df.stack(dropna=False)
599
- else: # pandas >= 2.2.0
658
+ elif self._pandas_version_before(
659
+ pd.__version__, "3.0"
660
+ ): # 2.2.0 <= pandas < 3.0.0
600
661
  df = df.stack(future_stack=True)
662
+ else: # pandas >= 3.0.0
663
+ df = df.stack()
601
664
 
602
665
  if dim == 1 or (self._multiheader and dim == columns.nlevels):
603
666
  # drop pandas default index level
@@ -638,11 +701,6 @@ class CSVReader(ConnectAgent):
638
701
  self._substitute_values(df)
639
702
  df.dropna(inplace=True)
640
703
 
641
- if self._sym_type == "par":
642
- sym = gt.Parameter(self._cdb.container, self._name, domain=domain)
643
- else:
644
- sym = gt.Set(self._cdb.container, self._name, domain=domain)
645
-
646
704
  # reset the index to the default integer index
647
705
  df = df.reset_index(drop=True)
648
706
 
@@ -651,7 +709,7 @@ class CSVReader(ConnectAgent):
651
709
  "Final DataFrame that will be processed by" f" GAMSTransfer:\n{df}"
652
710
  )
653
711
 
654
- sym.setRecords(df)
712
+ sym = self._create_sym(df, domain)
655
713
 
656
714
  if dim > 0 and self._stack:
657
715
  if self._multiheader:
@@ -276,9 +276,11 @@ class ExcelReader(ExcelAgent):
276
276
  # pandas-version-check
277
277
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
278
278
  df.replace(value_sub, inplace=True)
279
- else: # pandas >= 2.2.0
279
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
280
280
  with pd.option_context("future.no_silent_downcasting", True):
281
281
  df = df.replace(value_sub).infer_objects()
282
+ else: # pandas >= 3.0.0
283
+ df = df.replace(value_sub).infer_objects()
282
284
  return df
283
285
 
284
286
  def _write(self, df, sym_name, sym_type, rdim, cdim):
@@ -654,9 +656,11 @@ class ExcelReader(ExcelAgent):
654
656
  # pandas-version-check
655
657
  if self._pandas_version_before(pd.__version__, "2.2"): # pandas < 2.2.0
656
658
  df.replace(regex=pattern, value=GMS_SV_UNDEF, inplace=True)
657
- else: # pandas >= 2.2.0
659
+ elif self._pandas_version_before(pd.__version__, "3.0"): # 2.2.0 <= pandas < 3.0.0
658
660
  with pd.option_context("future.no_silent_downcasting", True):
659
661
  df = df.replace(regex=pattern, value=GMS_SV_UNDEF).infer_objects()
662
+ else: # pandas >= 3.0.0
663
+ df = df.replace(regex=pattern, value=GMS_SV_UNDEF).infer_objects()
660
664
  self._write(df, sym_name, sym_type, rdim, cdim)
661
665
 
662
666
  def _open(self):
@@ -10,8 +10,11 @@ columnDimension:
10
10
  min: 0
11
11
  max: 20
12
12
  clearSheet:
13
- type: boolean
14
13
  default: false
14
+ oneof:
15
+ - type: string
16
+ allowed: [range]
17
+ - type: boolean
15
18
  mergedCells:
16
19
  type: boolean
17
20
  default: false
@@ -65,9 +68,12 @@ symbols:
65
68
  - type: string
66
69
  allowed: [infer]
67
70
  clearSheet:
68
- type: boolean
69
71
  default: null
70
72
  nullable: true
73
+ oneof:
74
+ - type: string
75
+ allowed: [range]
76
+ - type: boolean
71
77
  mergedCells:
72
78
  type: boolean
73
79
  default: null
@@ -216,8 +216,12 @@ class SQLReader(ConnectAgent):
216
216
  pd.__version__, "2.2"
217
217
  ): # pandas < 2.2.0
218
218
  df = df.stack(dropna=False)
219
- else: # pandas >= 2.2.0
219
+ elif self._pandas_version_before(
220
+ pd.__version__, "3.0"
221
+ ): # 2.2.0 <= pandas < 3.0.0
220
222
  df = df.stack(future_stack=True)
223
+ else: # pandas >= 3.0.0
224
+ df = df.stack()
221
225
  if dim == 1:
222
226
  df = df.droplevel(
223
227
  level=0
@@ -117,6 +117,7 @@ class SQLWriter(ConnectAgent):
117
117
  self._describe_container(self._cdb.container, "Connect Container:")
118
118
 
119
119
  self._handler.validate_insert_method(method=self._insertMethod)
120
+ self._handler.is_schema_available(schema=self._schema_name)
120
121
  self._open()
121
122
 
122
123
  try:
@@ -170,6 +171,7 @@ class SQLWriter(ConnectAgent):
170
171
  insertMethod = sym["insertMethod"]
171
172
  skip_text = sym["skipText"]
172
173
  self._handler.validate_insert_method(method=insertMethod)
174
+ self._handler.is_schema_available(schema=schema)
173
175
 
174
176
  if self._small and table_name == "UEL$":
175
177
  self._connect_error(
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
Binary file
@@ -24,6 +24,17 @@
24
24
  #
25
25
 
26
26
  from gams.tools.toolcollection.tooltemplate import ToolTemplate
27
+ from gams.connect import ConnectDatabase
28
+ from gams import transfer as gt
29
+ from collections import defaultdict
30
+
31
+
32
+ def scalar_symbol_template():
33
+ """
34
+ Template for maintaining scalar symbols within a dictionary.
35
+ Intended to support potential future implementations of SpecialSymbols, subclasses of base symbols
36
+ """
37
+ return {"sym_name": [], "sym_recs": []}
27
38
 
28
39
 
29
40
  class Sqlitewrite(ToolTemplate):
@@ -95,6 +106,16 @@ class Sqlitewrite(ToolTemplate):
95
106
 
96
107
  return False
97
108
 
109
+ @staticmethod
110
+ def combine_scalars(m: gt.Container, scalar_dict, scalar_name, pd_concat):
111
+ df = pd_concat(scalar_dict["sym_recs"], ignore_index=True)
112
+ df.insert(0, "name", scalar_dict["sym_name"])
113
+ dom = ["name"]
114
+ if scalar_name in ["scalarvariables", "scalarequations"]:
115
+ dom.append("attribute")
116
+ df = df.melt(id_vars="name", var_name="attribute", value_name="value")
117
+ m.addParameter(name=scalar_name, domain=dom, records=df)
118
+
98
119
  def execute(self):
99
120
  if self.dohelp():
100
121
  return
@@ -137,9 +158,6 @@ class Sqlitewrite(ToolTemplate):
137
158
  unstack = self.check_bool_args("unstack")
138
159
  fast = self.check_bool_args("fast")
139
160
 
140
- from gams.connect import ConnectDatabase
141
- from gams import transfer as gt
142
-
143
161
  id_list = None # reads all if ids is not set
144
162
  if "ids" in self.namedargs:
145
163
  id_list = self.namedargs_val("ids").split(",")
@@ -148,117 +166,92 @@ class Sqlitewrite(ToolTemplate):
148
166
  m: gt.Container = cdb.container
149
167
  self.read_id_inputs(m, inputs=id_list)
150
168
 
151
- scalars = False
152
- scalar_parameter, scalar_variable, scalar_equation, symbols = [], [], [], []
169
+ symbols = []
170
+ scalar_data = defaultdict(scalar_symbol_template)
153
171
  cc = m.data.copy() # the following loop adds new symbols
154
172
 
155
- for sym_name, data in cc.items():
156
- if data.dimension == 0:
157
- scalars = True
158
- if isinstance(data, gt.Parameter): # scalar parameter
159
- scalar_parameter.append(sym_name)
160
- elif isinstance(data, gt.Variable): # scalar variable
161
- scalar_variable.append(sym_name)
162
- elif isinstance(data, gt.Equation): # scalar equation
163
- scalar_equation.append(sym_name)
164
- elif isinstance(data, gt.Variable) or isinstance(data, gt.Equation):
165
- dom = ",".join(f"d{i}" for i in range(data.dimension))
166
- cdb.execute(
167
- {
168
- "Projection": {
169
- "name": f"{sym_name}.all({dom})",
170
- "newName": f"{sym_name}_all({dom})",
171
- }
172
- }
173
+ for name, sym in cc.items():
174
+ if sym.dimension == 0:
175
+ if isinstance(sym, gt.Parameter):
176
+ sym_type = "Parameter"
177
+ elif isinstance(sym, gt.Equation):
178
+ sym_type = "Equation"
179
+ elif isinstance(sym, gt.Variable):
180
+ sym_type = "Variable"
181
+ else:
182
+ continue
183
+ scalar_data[sym_type]["sym_name"].append(sym.name)
184
+ scalar_data[sym_type]["sym_recs"].append(sym.records)
185
+ elif isinstance(sym, (gt.Variable, gt.Equation)):
186
+ if sym.records is not None:
187
+ dom = list(sym.records.columns[: sym.dimension])
188
+ df = sym.records.melt(
189
+ id_vars=dom, var_name="attribute", value_name="value"
190
+ )
191
+ else:
192
+ ### The old tool creates a blank table for symbols with no records
193
+ df = None
194
+ m.addParameter(
195
+ name=f"{name}_all",
196
+ # NOTE: using sym.domain causes validity issues later on in SQLWriter
197
+ domain=[dom if isinstance(dom, str) else dom.name for dom in sym.domain] + ["attribute"],
198
+ records=df,
173
199
  )
174
200
  symbols.append(
175
201
  {
176
- "name": f"{sym_name}_all",
177
- "tableName": sym_name if small else f"[{sym_name}]",
202
+ "name": f"{name}_all",
203
+ "tableName": name if small else f"[{name}]",
178
204
  "unstack": True,
179
205
  }
180
206
  )
181
207
 
182
- elif isinstance(data, gt.Alias):
208
+ elif isinstance(sym, gt.Alias):
183
209
  pass
184
210
  else:
185
211
  symbols.append(
186
212
  {
187
- "name": sym_name,
188
- "tableName": sym_name if small else f"[{sym_name}]",
213
+ "name": name,
214
+ "tableName": name if small else f"[{name}]",
189
215
  }
190
216
  )
191
-
192
- if scalars:
193
- if scalar_parameter:
194
- cdb.execute(
195
- {"Projection": {"name": scalar_parameter, "newName": "scalars"}}
196
- )
197
- m["scalars"].records = m["scalars"].records.rename(
198
- columns={"uni_0": "name"}
199
- )
200
- symbols.append({"name": "scalars", "tableName": "scalars"})
201
- if scalar_variable:
202
- cdb.execute(
203
- [
204
- # combine all scalar variables into one
205
- {
206
- "Projection": {
207
- "name": scalar_variable,
208
- "newName": "scalarvariables_dummy",
209
- }
210
- },
211
- # convert the combined variable to parameter with variable attributes
212
- {
213
- "Projection": {
214
- "name": f"scalarvariables_dummy.all(i)",
215
- "newName": f"scalarvariables(i)",
216
- }
217
- },
218
- ]
219
- )
220
- m["scalarvariables"].records = m["scalarvariables"].records.rename(
221
- columns={"uni_0": "name"}
222
- )
223
- symbols.append(
224
- {
225
- "name": "scalarvariables",
226
- "tableName": "scalarvariables",
227
- "unstack": True,
228
- }
229
- )
230
- m.removeSymbols(symbols="scalarvariables_dummy")
231
- if scalar_equation:
232
- cdb.execute(
233
- [
234
- # combine all scalar equations into one
235
- {
236
- "Projection": {
237
- "name": scalar_equation,
238
- "newName": "scalarequations_dummy",
239
- }
240
- },
241
- # convert the combined equation to parameter with equation attributes
242
- {
243
- "Projection": {
244
- "name": f"scalarequations_dummy.all(i)",
245
- "newName": f"scalarequations(i)",
246
- }
247
- },
248
- ]
249
- )
250
-
251
- m["scalarequations"].records = m["scalarequations"].records.rename(
252
- columns={"uni_0": "name"}
253
- )
254
- symbols.append(
255
- {
256
- "name": "scalarequations",
257
- "tableName": "scalarequations",
258
- "unstack": True,
259
- }
260
- )
261
- m.removeSymbols(symbols="scalarequations_dummy")
217
+ if any(data["sym_name"] for data in scalar_data.values()):
218
+ from pandas import concat as pd_concat
219
+ if scalar_data["Parameter"]["sym_name"]:
220
+ self.combine_scalars(
221
+ m,
222
+ scalar_dict=scalar_data["Parameter"],
223
+ scalar_name="scalars",
224
+ pd_concat=pd_concat,
225
+ )
226
+ symbols.append({"name": "scalars", "tableName": "scalars"})
227
+ if scalar_data["Variable"]["sym_name"]:
228
+ self.combine_scalars(
229
+ m,
230
+ scalar_dict=scalar_data["Variable"],
231
+ scalar_name="scalarvariables",
232
+ pd_concat=pd_concat,
233
+ )
234
+ symbols.append(
235
+ {
236
+ "name": "scalarvariables",
237
+ "tableName": "scalarvariables",
238
+ "unstack": True,
239
+ }
240
+ )
241
+ if scalar_data["Equation"]["sym_name"]:
242
+ self.combine_scalars(
243
+ m,
244
+ scalar_dict=scalar_data["Equation"],
245
+ scalar_name="scalarequations",
246
+ pd_concat=pd_concat,
247
+ )
248
+ symbols.append(
249
+ {
250
+ "name": "scalarequations",
251
+ "tableName": "scalarequations",
252
+ "unstack": True,
253
+ }
254
+ )
262
255
  sqlite_params = {
263
256
  "connection": {"database": sqlite_file},
264
257
  "connectionArguments": {"__globalCommit__": True},
@@ -212,7 +212,7 @@ def convert_to_categoricals_str(
212
212
  dfs.append(pd.DataFrame(arrvals))
213
213
 
214
214
  if has_domains and has_values:
215
- df = pd.concat(dfs, axis=1, copy=False)
215
+ df = pd.concat(dfs, axis=1)
216
216
  df.columns = pd.RangeIndex(start=0, stop=len(df.columns))
217
217
  elif has_domains or has_values:
218
218
  df = dfs[0]
@@ -245,7 +245,7 @@ def convert_to_categoricals_cat(
245
245
  dfs.append(pd.DataFrame(arrvals))
246
246
 
247
247
  if has_domains and has_values:
248
- df = pd.concat(dfs, axis=1, copy=False)
248
+ df = pd.concat(dfs, axis=1)
249
249
  df.columns = pd.RangeIndex(start=0, stop=len(df.columns))
250
250
  elif has_domains or has_values:
251
251
  df = dfs[0]