MindsDB 25.7.3.0__py3-none-any.whl → 25.7.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (61) hide show
  1. mindsdb/__about__.py +1 -1
  2. mindsdb/api/a2a/common/server/server.py +16 -6
  3. mindsdb/api/executor/command_executor.py +206 -135
  4. mindsdb/api/executor/datahub/datanodes/project_datanode.py +14 -3
  5. mindsdb/api/executor/planner/plan_join.py +3 -0
  6. mindsdb/api/executor/planner/plan_join_ts.py +117 -100
  7. mindsdb/api/executor/planner/query_planner.py +1 -0
  8. mindsdb/api/executor/sql_query/steps/apply_predictor_step.py +54 -85
  9. mindsdb/api/http/initialize.py +16 -43
  10. mindsdb/api/http/namespaces/agents.py +23 -20
  11. mindsdb/api/http/namespaces/chatbots.py +83 -120
  12. mindsdb/api/http/namespaces/file.py +1 -1
  13. mindsdb/api/http/namespaces/jobs.py +38 -60
  14. mindsdb/api/http/namespaces/tree.py +69 -61
  15. mindsdb/api/mcp/start.py +2 -0
  16. mindsdb/api/mysql/mysql_proxy/utilities/dump.py +3 -2
  17. mindsdb/integrations/handlers/autogluon_handler/requirements.txt +1 -1
  18. mindsdb/integrations/handlers/autosklearn_handler/requirements.txt +1 -1
  19. mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py +25 -5
  20. mindsdb/integrations/handlers/chromadb_handler/chromadb_handler.py +3 -3
  21. mindsdb/integrations/handlers/flaml_handler/requirements.txt +1 -1
  22. mindsdb/integrations/handlers/google_calendar_handler/google_calendar_tables.py +82 -73
  23. mindsdb/integrations/handlers/hubspot_handler/requirements.txt +1 -1
  24. mindsdb/integrations/handlers/langchain_handler/langchain_handler.py +83 -76
  25. mindsdb/integrations/handlers/lightwood_handler/requirements.txt +4 -4
  26. mindsdb/integrations/handlers/litellm_handler/litellm_handler.py +5 -2
  27. mindsdb/integrations/handlers/litellm_handler/settings.py +2 -1
  28. mindsdb/integrations/handlers/pgvector_handler/pgvector_handler.py +106 -90
  29. mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +41 -39
  30. mindsdb/integrations/handlers/salesforce_handler/constants.py +208 -0
  31. mindsdb/integrations/handlers/salesforce_handler/salesforce_handler.py +141 -80
  32. mindsdb/integrations/handlers/salesforce_handler/salesforce_tables.py +0 -1
  33. mindsdb/integrations/handlers/tpot_handler/requirements.txt +1 -1
  34. mindsdb/integrations/handlers/web_handler/urlcrawl_helpers.py +32 -17
  35. mindsdb/integrations/handlers/web_handler/web_handler.py +19 -22
  36. mindsdb/integrations/libs/vectordatabase_handler.py +10 -1
  37. mindsdb/integrations/utilities/handler_utils.py +32 -12
  38. mindsdb/interfaces/agents/agents_controller.py +167 -108
  39. mindsdb/interfaces/agents/langchain_agent.py +10 -3
  40. mindsdb/interfaces/data_catalog/data_catalog_loader.py +4 -4
  41. mindsdb/interfaces/database/database.py +38 -13
  42. mindsdb/interfaces/database/integrations.py +20 -5
  43. mindsdb/interfaces/database/projects.py +63 -16
  44. mindsdb/interfaces/database/views.py +86 -60
  45. mindsdb/interfaces/jobs/jobs_controller.py +103 -110
  46. mindsdb/interfaces/knowledge_base/controller.py +26 -5
  47. mindsdb/interfaces/knowledge_base/evaluate.py +2 -1
  48. mindsdb/interfaces/knowledge_base/executor.py +24 -0
  49. mindsdb/interfaces/query_context/context_controller.py +100 -133
  50. mindsdb/interfaces/skills/skills_controller.py +18 -6
  51. mindsdb/interfaces/storage/db.py +40 -6
  52. mindsdb/interfaces/variables/variables_controller.py +8 -15
  53. mindsdb/utilities/config.py +3 -3
  54. mindsdb/utilities/functions.py +72 -60
  55. mindsdb/utilities/log.py +38 -6
  56. mindsdb/utilities/ps.py +7 -7
  57. {mindsdb-25.7.3.0.dist-info → mindsdb-25.7.4.0.dist-info}/METADATA +246 -247
  58. {mindsdb-25.7.3.0.dist-info → mindsdb-25.7.4.0.dist-info}/RECORD +61 -60
  59. {mindsdb-25.7.3.0.dist-info → mindsdb-25.7.4.0.dist-info}/WHEEL +0 -0
  60. {mindsdb-25.7.3.0.dist-info → mindsdb-25.7.4.0.dist-info}/licenses/LICENSE +0 -0
  61. {mindsdb-25.7.3.0.dist-info → mindsdb-25.7.4.0.dist-info}/top_level.txt +0 -0
@@ -26,38 +26,28 @@ from .base import BaseStepCall
26
26
 
27
27
 
28
28
  def get_preditor_alias(step, mindsdb_database):
29
- predictor_name = '.'.join(step.predictor.parts)
30
- predictor_alias = '.'.join(step.predictor.alias.parts) if step.predictor.alias is not None else predictor_name
29
+ predictor_name = ".".join(step.predictor.parts)
30
+ predictor_alias = ".".join(step.predictor.alias.parts) if step.predictor.alias is not None else predictor_name
31
31
  return (mindsdb_database, predictor_name, predictor_alias)
32
32
 
33
33
 
34
34
  class ApplyPredictorBaseCall(BaseStepCall):
35
-
36
35
  def apply_predictor(self, project_name, predictor_name, df, version, params):
37
36
  # is it an agent?
38
37
  agent = self.session.agents_controller.get_agent(predictor_name, project_name)
39
38
  if agent is not None:
40
-
41
- messages = df.to_dict('records')
39
+ messages = df.to_dict("records")
42
40
  predictions = self.session.agents_controller.get_completion(
43
- agent,
44
- messages=messages,
45
- project_name=project_name,
41
+ agent, messages=messages, project_name=project_name, params=params
46
42
  )
47
43
 
48
44
  else:
49
45
  project_datanode = self.session.datahub.get(project_name)
50
- predictions = project_datanode.predict(
51
- model_name=predictor_name,
52
- df=df,
53
- version=version,
54
- params=params
55
- )
46
+ predictions = project_datanode.predict(model_name=predictor_name, df=df, version=version, params=params)
56
47
  return predictions
57
48
 
58
49
 
59
50
  class ApplyPredictorRowStepCall(ApplyPredictorBaseCall):
60
-
61
51
  bind = ApplyPredictorRowStep
62
52
 
63
53
  def call(self, step):
@@ -89,7 +79,7 @@ class ApplyPredictorRowStepCall(ApplyPredictorBaseCall):
89
79
  for k, v in where_data.items():
90
80
  predictions[k] = v
91
81
 
92
- table_name = get_preditor_alias(step, self.context.get('database'))
82
+ table_name = get_preditor_alias(step, self.context.get("database"))
93
83
 
94
84
  if len(predictions) == 0:
95
85
  columns_names = project_datanode.get_table_columns_names(predictor_name)
@@ -100,12 +90,11 @@ class ApplyPredictorRowStepCall(ApplyPredictorBaseCall):
100
90
  database=table_name[0],
101
91
  table_name=table_name[1],
102
92
  table_alias=table_name[2],
103
- is_prediction=True
93
+ is_prediction=True,
104
94
  )
105
95
 
106
96
 
107
97
  class ApplyPredictorStepCall(ApplyPredictorBaseCall):
108
-
109
98
  bind = ApplyPredictorStep
110
99
 
111
100
  def call(self, step):
@@ -115,20 +104,20 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
115
104
  params = step.params or {}
116
105
 
117
106
  # adding __mindsdb_row_id, use first table if exists
118
- if len(data.find_columns('__mindsdb_row_id')) == 0:
107
+ if len(data.find_columns("__mindsdb_row_id")) == 0:
119
108
  table = data.get_tables()[0] if len(data.get_tables()) > 0 else None
120
109
 
121
110
  row_id_col = Column(
122
- name='__mindsdb_row_id',
123
- database=table['database'] if table is not None else None,
124
- table_name=table['table_name'] if table is not None else None,
125
- table_alias=table['table_alias'] if table is not None else None
111
+ name="__mindsdb_row_id",
112
+ database=table["database"] if table is not None else None,
113
+ table_name=table["table_name"] if table is not None else None,
114
+ table_alias=table["table_alias"] if table is not None else None,
126
115
  )
127
116
 
128
- row_id = self.context.get('row_id')
117
+ row_id = self.context.get("row_id")
129
118
  values = range(row_id, row_id + data.length())
130
119
  data.add_column(row_id_col, values)
131
- self.context['row_id'] += data.length()
120
+ self.context["row_id"] += data.length()
132
121
 
133
122
  project_name = step.namespace
134
123
  predictor_name = step.predictor.parts[0]
@@ -143,47 +132,46 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
143
132
  data.set_column_values(k, v)
144
133
 
145
134
  predictor_metadata = {}
146
- for pm in self.context['predictor_metadata']:
147
- if pm['name'] == predictor_name and pm['integration_name'].lower() == project_name:
135
+ for pm in self.context["predictor_metadata"]:
136
+ if pm["name"] == predictor_name and pm["integration_name"].lower() == project_name:
148
137
  predictor_metadata = pm
149
138
  break
150
- is_timeseries = predictor_metadata['timeseries']
139
+ is_timeseries = predictor_metadata["timeseries"]
151
140
  _mdb_forecast_offset = None
152
141
  if is_timeseries:
153
- if '> LATEST' in self.context['query_str']:
142
+ if "> LATEST" in self.context["query_str"]:
154
143
  # stream mode -- if > LATEST, forecast starts on inferred next timestamp
155
144
  _mdb_forecast_offset = 1
156
- elif '= LATEST' in self.context['query_str']:
145
+ elif "= LATEST" in self.context["query_str"]:
157
146
  # override: when = LATEST, forecast starts on last provided timestamp instead of inferred next time
158
147
  _mdb_forecast_offset = 0
159
148
  else:
160
149
  # normal mode -- emit a forecast ($HORIZON data points on each) for each provided timestamp
161
- params['force_ts_infer'] = True
150
+ params["force_ts_infer"] = True
162
151
  _mdb_forecast_offset = None
163
152
 
164
- data.add_column(Column(name='__mdb_forecast_offset'), _mdb_forecast_offset)
153
+ data.add_column(Column(name="__mdb_forecast_offset"), _mdb_forecast_offset)
165
154
 
166
- table_name = get_preditor_alias(step, self.context['database'])
155
+ table_name = get_preditor_alias(step, self.context["database"])
167
156
 
168
157
  project_datanode = self.session.datahub.get(project_name)
169
158
  if len(data) == 0:
170
- columns_names = project_datanode.get_table_columns_names(predictor_name) + ['__mindsdb_row_id']
159
+ columns_names = project_datanode.get_table_columns_names(predictor_name) + ["__mindsdb_row_id"]
171
160
  result = ResultSet(is_prediction=True)
172
161
  for column_name in columns_names:
173
- result.add_column(Column(
174
- name=column_name,
175
- database=table_name[0],
176
- table_name=table_name[1],
177
- table_alias=table_name[2]
178
- ))
162
+ result.add_column(
163
+ Column(
164
+ name=column_name, database=table_name[0], table_name=table_name[1], table_alias=table_name[2]
165
+ )
166
+ )
179
167
  else:
180
- predictor_id = predictor_metadata['id']
168
+ predictor_id = predictor_metadata["id"]
181
169
  table_df = data.to_df()
182
170
 
183
171
  if self.session.predictor_cache is not False:
184
- key = f'{predictor_name}_{predictor_id}_{dataframe_checksum(table_df)}'
172
+ key = f"{predictor_name}_{predictor_id}_{dataframe_checksum(table_df)}"
185
173
 
186
- predictor_cache = get_cache('predict')
174
+ predictor_cache = get_cache("predict")
187
175
  predictions = predictor_cache.get(key)
188
176
  else:
189
177
  predictions = None
@@ -221,7 +209,7 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
221
209
 
222
210
  # apply filter
223
211
  if is_timeseries:
224
- pred_data = predictions.to_dict(orient='records')
212
+ pred_data = predictions.to_dict(orient="records")
225
213
  where_data = list(data.get_records())
226
214
  pred_data = self.apply_ts_filter(pred_data, where_data, step, predictor_metadata)
227
215
  predictions = pd.DataFrame(pred_data)
@@ -231,37 +219,33 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
231
219
  database=table_name[0],
232
220
  table_name=table_name[1],
233
221
  table_alias=table_name[2],
234
- is_prediction=True
222
+ is_prediction=True,
235
223
  )
236
224
 
237
225
  return result
238
226
 
239
227
  def apply_ts_filter(self, predictor_data, table_data, step, predictor_metadata):
240
-
241
228
  if step.output_time_filter is None:
242
229
  # no filter, exit
243
230
  return predictor_data
244
231
 
245
232
  # apply filter
246
- group_cols = predictor_metadata['group_by_columns']
247
- order_col = predictor_metadata['order_by_column']
233
+ group_cols = predictor_metadata["group_by_columns"]
234
+ order_col = predictor_metadata["order_by_column"]
248
235
 
249
236
  filter_args = step.output_time_filter.args
250
237
  filter_op = step.output_time_filter.op
251
238
 
252
239
  # filter field must be order column
253
- if not (
254
- isinstance(filter_args[0], Identifier)
255
- and filter_args[0].parts[-1] == order_col
256
- ):
240
+ if not (isinstance(filter_args[0], Identifier) and filter_args[0].parts[-1] == order_col):
257
241
  # exit otherwise
258
242
  return predictor_data
259
243
 
260
244
  def get_date_format(samples):
261
245
  # Try common formats first with explicit patterns
262
246
  for date_format, pattern in (
263
- ('%Y-%m-%d', r'[\d]{4}-[\d]{2}-[\d]{2}'),
264
- ('%Y-%m-%d %H:%M:%S', r'[\d]{4}-[\d]{2}-[\d]{2} [\d]{2}:[\d]{2}:[\d]{2}'),
247
+ ("%Y-%m-%d", r"[\d]{4}-[\d]{2}-[\d]{2}"),
248
+ ("%Y-%m-%d %H:%M:%S", r"[\d]{4}-[\d]{2}-[\d]{2} [\d]{2}:[\d]{2}:[\d]{2}"),
265
249
  # ('%Y-%m-%d %H:%M:%S%z', r'[\d]{4}-[\d]{2}-[\d]{2} [\d]{2}:[\d]{2}:[\d]{2}\+[\d]{2}:[\d]{2}'),
266
250
  # ('%Y', '[\d]{4}')
267
251
  ):
@@ -281,6 +265,7 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
281
265
  # Parse the first sample to get its format
282
266
  # The import is heavy, so we do it here on-demand
283
267
  import dateparser
268
+
284
269
  parsed_date = dateparser.parse(samples[0])
285
270
  if parsed_date is None:
286
271
  raise ValueError("Could not parse date")
@@ -290,25 +275,21 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
290
275
  if dateparser.parse(sample) is None:
291
276
  raise ValueError("Inconsistent date formats in samples")
292
277
  # Convert to strftime format based on the input
293
- if re.search(r'\d{2}:\d{2}:\d{2}', samples[0]):
294
- return '%Y-%m-%d %H:%M:%S'
295
- return '%Y-%m-%d'
278
+ if re.search(r"\d{2}:\d{2}:\d{2}", samples[0]):
279
+ return "%Y-%m-%d %H:%M:%S"
280
+ return "%Y-%m-%d"
296
281
  except (ValueError, AttributeError):
297
282
  # If dateparser fails, return a basic format as last resort
298
- return '%Y-%m-%d'
283
+ return "%Y-%m-%d"
299
284
 
300
- model_types = predictor_metadata['model_types']
301
- if model_types.get(order_col) in ('float', 'integer'):
285
+ model_types = predictor_metadata["model_types"]
286
+ if model_types.get(order_col) in ("float", "integer"):
302
287
  # convert strings to digits
303
- fnc = {
304
- 'integer': int,
305
- 'float': float
306
- }[model_types[order_col]]
288
+ fnc = {"integer": int, "float": float}[model_types[order_col]]
307
289
 
308
290
  # convert predictor_data
309
291
  if len(predictor_data) > 0:
310
292
  if isinstance(predictor_data[0][order_col], str):
311
-
312
293
  for row in predictor_data:
313
294
  row[order_col] = fnc(row[order_col])
314
295
  elif isinstance(predictor_data[0][order_col], dt.date):
@@ -318,7 +299,6 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
318
299
 
319
300
  # convert predictor_data
320
301
  if isinstance(table_data[0][order_col], str):
321
-
322
302
  for row in table_data:
323
303
  row[order_col] = fnc(row[order_col])
324
304
  elif isinstance(table_data[0][order_col], dt.date):
@@ -327,18 +307,13 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
327
307
  row[order_col] = fnc(row[order_col])
328
308
 
329
309
  # convert args to date
330
- samples = [
331
- arg.value
332
- for arg in filter_args
333
- if isinstance(arg, Constant) and isinstance(arg.value, str)
334
- ]
310
+ samples = [arg.value for arg in filter_args if isinstance(arg, Constant) and isinstance(arg.value, str)]
335
311
  if len(samples) > 0:
336
-
337
312
  for arg in filter_args:
338
313
  if isinstance(arg, Constant) and isinstance(arg.value, str):
339
314
  arg.value = fnc(arg.value)
340
315
 
341
- if model_types.get(order_col) in ('date', 'datetime') or isinstance(predictor_data[0][order_col], pd.Timestamp): # noqa
316
+ if model_types.get(order_col) in ("date", "datetime") or isinstance(predictor_data[0][order_col], pd.Timestamp): # noqa
342
317
  # convert strings to date
343
318
  # it is making side effect on original data by changing it but let it be
344
319
 
@@ -364,11 +339,7 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
364
339
  _cast_samples(table_data, order_col)
365
340
 
366
341
  # convert args to date
367
- samples = [
368
- arg.value
369
- for arg in filter_args
370
- if isinstance(arg, Constant) and isinstance(arg.value, str)
371
- ]
342
+ samples = [arg.value for arg in filter_args if isinstance(arg, Constant) and isinstance(arg.value, str)]
372
343
  if len(samples) > 0:
373
344
  date_format = get_date_format(samples)
374
345
 
@@ -380,7 +351,6 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
380
351
  # first pass: get max values for Latest in table data
381
352
  latest_vals = {}
382
353
  if Latest() in filter_args:
383
-
384
354
  for row in table_data:
385
355
  if group_cols is None:
386
356
  key = 0 # the same for any value
@@ -400,11 +370,11 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
400
370
  data2.append(row)
401
371
  elif isinstance(step.output_time_filter, BinaryOperation):
402
372
  op_map = {
403
- '<': '__lt__',
404
- '<=': '__le__',
405
- '>': '__gt__',
406
- '>=': '__ge__',
407
- '=': '__eq__',
373
+ "<": "__lt__",
374
+ "<=": "__le__",
375
+ ">": "__gt__",
376
+ ">=": "__ge__",
377
+ "=": "__eq__",
408
378
  }
409
379
  arg = filter_args[1]
410
380
  if isinstance(arg, Latest):
@@ -435,5 +405,4 @@ class ApplyPredictorStepCall(ApplyPredictorBaseCall):
435
405
 
436
406
 
437
407
  class ApplyTimeseriesPredictorStepCall(ApplyPredictorStepCall):
438
-
439
408
  bind = ApplyTimeseriesPredictorStep
@@ -43,6 +43,7 @@ from mindsdb.api.http.namespaces.webhooks import ns_conf as webhooks_ns
43
43
  from mindsdb.interfaces.database.integrations import integration_controller
44
44
  from mindsdb.interfaces.database.database import DatabaseController
45
45
  from mindsdb.interfaces.file.file_controller import FileController
46
+ from mindsdb.interfaces.jobs.jobs_controller import JobsController
46
47
  from mindsdb.interfaces.storage import db
47
48
  from mindsdb.metrics.server import init_metrics
48
49
  from mindsdb.utilities import log
@@ -109,9 +110,7 @@ def get_last_compatible_gui_version() -> Version:
109
110
  return False
110
111
 
111
112
  if res.status_code != 200:
112
- logger.error(
113
- f"Cant get compatible-config.json: returned status code = {res.status_code}"
114
- )
113
+ logger.error(f"Cant get compatible-config.json: returned status code = {res.status_code}")
115
114
  return False
116
115
 
117
116
  try:
@@ -132,10 +131,7 @@ def get_last_compatible_gui_version() -> Version:
132
131
  else:
133
132
  mindsdb_lv = parse_version(el["mindsdb_version"])
134
133
  gui_lv = parse_version(el["gui_version"])
135
- if (
136
- mindsdb_lv.base_version not in gui_versions
137
- or gui_lv > gui_versions[mindsdb_lv.base_version]
138
- ):
134
+ if mindsdb_lv.base_version not in gui_versions or gui_lv > gui_versions[mindsdb_lv.base_version]:
139
135
  gui_versions[mindsdb_lv.base_version] = gui_lv
140
136
  if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
141
137
  max_mindsdb_lv = mindsdb_lv
@@ -151,9 +147,7 @@ def get_last_compatible_gui_version() -> Version:
151
147
  gui_version_lv = max_gui_lv
152
148
  else:
153
149
  lower_versions = {
154
- key: value
155
- for key, value in gui_versions.items()
156
- if parse_version(key) < current_mindsdb_lv
150
+ key: value for key, value in gui_versions.items() if parse_version(key) < current_mindsdb_lv
157
151
  }
158
152
  if len(lower_versions) == 0:
159
153
  gui_version_lv = gui_versions[all_mindsdb_lv[0].base_version]
@@ -169,7 +163,7 @@ def get_last_compatible_gui_version() -> Version:
169
163
 
170
164
 
171
165
  def get_current_gui_version() -> Version:
172
- logger.debug("Getting current frontend version..")
166
+ logger.debug("Getting current frontend version...")
173
167
  config = Config()
174
168
  static_path = Path(config["paths"]["static"])
175
169
  version_txt_path = static_path.joinpath("version.txt")
@@ -179,9 +173,7 @@ def get_current_gui_version() -> Version:
179
173
  with open(version_txt_path, "rt") as f:
180
174
  current_gui_version = f.readline()
181
175
 
182
- current_gui_lv = (
183
- None if current_gui_version is None else parse_version(current_gui_version)
184
- )
176
+ current_gui_lv = None if current_gui_version is None else parse_version(current_gui_version)
185
177
  logger.debug(f"Current frontend version: {current_gui_lv}.")
186
178
 
187
179
  return current_gui_lv
@@ -197,10 +189,7 @@ def initialize_static():
197
189
  if required_gui_version is not None:
198
190
  required_gui_version_lv = parse_version(required_gui_version)
199
191
  success = True
200
- if (
201
- current_gui_version_lv is None
202
- or required_gui_version_lv != current_gui_version_lv
203
- ):
192
+ if current_gui_version_lv is None or required_gui_version_lv != current_gui_version_lv:
204
193
  logger.debug("Updating gui..")
205
194
  success = update_static(required_gui_version_lv)
206
195
  else:
@@ -208,10 +197,7 @@ def initialize_static():
208
197
  return False
209
198
 
210
199
  # ignore versions like '23.9.2.2'
211
- if (
212
- current_gui_version_lv is not None
213
- and len(current_gui_version_lv.release) < 3
214
- ):
200
+ if current_gui_version_lv is not None and len(current_gui_version_lv.release) < 3:
215
201
  if current_gui_version_lv == last_gui_version_lv:
216
202
  return True
217
203
  logger.debug("Updating gui..")
@@ -227,12 +213,8 @@ def initialize_app(config, no_studio):
227
213
  gui_exists = Path(static_root).joinpath("index.html").is_file()
228
214
  logger.debug(f"Does GUI already exist.. {'YES' if gui_exists else 'NO'}")
229
215
  init_static_thread = None
230
- if no_studio is False and (
231
- config["gui"]["autoupdate"] is True or gui_exists is False
232
- ):
233
- init_static_thread = threading.Thread(
234
- target=initialize_static, name="initialize_static"
235
- )
216
+ if no_studio is False and (config["gui"]["autoupdate"] is True or gui_exists is False):
217
+ init_static_thread = threading.Thread(target=initialize_static, name="initialize_static")
236
218
  init_static_thread.start()
237
219
 
238
220
  # Wait for static initialization.
@@ -334,9 +316,7 @@ def initialize_app(config, no_studio):
334
316
  # region routes where auth is required
335
317
  if (
336
318
  config["auth"]["http_auth_enabled"] is True
337
- and any(
338
- request.path.startswith(f"/api{ns.path}") for ns in protected_namespaces
339
- )
319
+ and any(request.path.startswith(f"/api{ns.path}") for ns in protected_namespaces)
340
320
  and check_auth() is False
341
321
  ):
342
322
  return http_error(
@@ -368,18 +348,14 @@ def initialize_app(config, no_studio):
368
348
  try:
369
349
  company_id = int(company_id)
370
350
  except Exception as e:
371
- logger.error(
372
- f"Cloud not parse company id: {company_id} | exception: {e}"
373
- )
351
+ logger.error(f"Cloud not parse company id: {company_id} | exception: {e}")
374
352
  company_id = None
375
353
 
376
354
  if user_class is not None:
377
355
  try:
378
356
  user_class = int(user_class)
379
357
  except Exception as e:
380
- logger.error(
381
- f"Cloud not parse user_class: {user_class} | exception: {e}"
382
- )
358
+ logger.error(f"Cloud not parse user_class: {user_class} | exception: {e}")
383
359
  user_class = 0
384
360
  else:
385
361
  user_class = 0
@@ -419,9 +395,7 @@ def initialize_flask(config, init_static_thread, no_studio):
419
395
 
420
396
  app.config["SECRET_KEY"] = os.environ.get("FLASK_SECRET_KEY", secrets.token_hex(32))
421
397
  app.config["SESSION_COOKIE_NAME"] = "session"
422
- app.config["PERMANENT_SESSION_LIFETIME"] = config["auth"][
423
- "http_permanent_session_lifetime"
424
- ]
398
+ app.config["PERMANENT_SESSION_LIFETIME"] = config["auth"]["http_permanent_session_lifetime"]
425
399
  app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 60
426
400
  app.config["SWAGGER_HOST"] = "http://localhost:8000/mindsdb"
427
401
  app.json = CustomJSONProvider()
@@ -467,6 +441,7 @@ def initialize_interfaces(app):
467
441
  app.integration_controller = integration_controller
468
442
  app.database_controller = DatabaseController()
469
443
  app.file_controller = FileController()
444
+ app.jobs_controller = JobsController()
470
445
  config = Config()
471
446
  app.config_obj = config
472
447
 
@@ -479,9 +454,7 @@ def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_f
479
454
  if init_static_thread is not None:
480
455
  init_static_thread.join()
481
456
  try:
482
- is_http_active = wait_func_is_true(
483
- func=is_pid_listen_port, timeout=15, pid=pid, port=port
484
- )
457
+ is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=15, pid=pid, port=port)
485
458
  if is_http_active:
486
459
  webbrowser.open(url)
487
460
  except Exception as e:
@@ -27,14 +27,21 @@ def create_agent(project_name, name, agent):
27
27
  if name is None:
28
28
  return http_error(HTTPStatus.BAD_REQUEST, "Missing field", 'Missing "name" field for agent')
29
29
 
30
- if "model_name" not in agent:
31
- return http_error(HTTPStatus.BAD_REQUEST, "Missing field", 'Missing "model_name" field for agent')
30
+ if not name.islower():
31
+ return http_error(HTTPStatus.BAD_REQUEST, "Wrong name", f"The name must be in lower case: {name}")
32
32
 
33
- model_name = agent["model_name"]
33
+ model_name = agent.get("model_name")
34
34
  provider = agent.get("provider")
35
- params = agent.get("params", {})
36
35
  skills = agent.get("skills", [])
37
36
 
37
+ params = agent.get("params", {})
38
+ if agent.get("data"):
39
+ params["data"] = agent["data"]
40
+ if agent.get("model"):
41
+ params["model"] = agent["model"]
42
+ if agent.get("prompt_template"):
43
+ params["prompt_template"] = agent["prompt_template"]
44
+
38
45
  agents_controller = AgentsController()
39
46
 
40
47
  try:
@@ -153,12 +160,6 @@ class AgentResource(Resource):
153
160
 
154
161
  agent = request.json["agent"]
155
162
  name = agent.get("name", None)
156
- model_name = agent.get("model_name", None)
157
- skills_to_add = agent.get("skills_to_add", [])
158
- skills_to_remove = agent.get("skills_to_remove", [])
159
- skills_to_rewrite = agent.get("skills", [])
160
- provider = agent.get("provider")
161
- params = agent.get("params", None)
162
163
 
163
164
  # Agent must not exist with new name.
164
165
  if name is not None and name != agent_name:
@@ -176,9 +177,18 @@ class AgentResource(Resource):
176
177
 
177
178
  # Update
178
179
  try:
179
- # Prepare the params dictionary
180
- if params is None:
181
- params = {}
180
+ model_name = agent.get("model_name", None)
181
+ skills_to_add = agent.get("skills_to_add", [])
182
+ skills_to_remove = agent.get("skills_to_remove", [])
183
+ skills_to_rewrite = agent.get("skills", [])
184
+ provider = agent.get("provider")
185
+ params = agent.get("params", {})
186
+ if agent.get("data"):
187
+ params["data"] = agent["data"]
188
+ if agent.get("model"):
189
+ params["model"] = agent["model"]
190
+ if agent.get("prompt_template"):
191
+ params["prompt_template"] = agent["prompt_template"]
182
192
 
183
193
  # Check if any of the skills to be added is of type 'retrieval'
184
194
  session = SessionController()
@@ -377,13 +387,6 @@ class AgentCompletions(Resource):
377
387
  HTTPStatus.NOT_FOUND, "Project not found", f"Project with name {project_name} does not exist"
378
388
  )
379
389
 
380
- # Add OpenAI API key to agent params if not already present.
381
- if not existing_agent.params:
382
- existing_agent.params = {}
383
- existing_agent.params["openai_api_key"] = existing_agent.params.get(
384
- "openai_api_key", os.getenv("OPENAI_API_KEY")
385
- )
386
-
387
390
  # set mode to `retrieval` if agent has a skill of type `retrieval` and mode is not set
388
391
  if "mode" not in existing_agent.params and any(
389
392
  rel.skill.type == "retrieval" for rel in existing_agent.skills_relationships