MindsDB 25.7.2.0__py3-none-any.whl → 25.7.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (69) hide show
  1. mindsdb/__about__.py +1 -1
  2. mindsdb/__main__.py +1 -1
  3. mindsdb/api/a2a/common/server/server.py +16 -6
  4. mindsdb/api/executor/command_executor.py +213 -137
  5. mindsdb/api/executor/datahub/datanodes/integration_datanode.py +5 -1
  6. mindsdb/api/executor/datahub/datanodes/project_datanode.py +14 -3
  7. mindsdb/api/executor/planner/plan_join.py +3 -0
  8. mindsdb/api/executor/planner/plan_join_ts.py +117 -100
  9. mindsdb/api/executor/planner/query_planner.py +1 -0
  10. mindsdb/api/executor/sql_query/steps/apply_predictor_step.py +54 -85
  11. mindsdb/api/http/initialize.py +16 -43
  12. mindsdb/api/http/namespaces/agents.py +24 -21
  13. mindsdb/api/http/namespaces/chatbots.py +83 -120
  14. mindsdb/api/http/namespaces/file.py +1 -1
  15. mindsdb/api/http/namespaces/jobs.py +38 -60
  16. mindsdb/api/http/namespaces/tree.py +69 -61
  17. mindsdb/api/mcp/start.py +2 -0
  18. mindsdb/api/mysql/mysql_proxy/utilities/dump.py +3 -2
  19. mindsdb/integrations/handlers/autogluon_handler/requirements.txt +1 -1
  20. mindsdb/integrations/handlers/autosklearn_handler/requirements.txt +1 -1
  21. mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py +25 -5
  22. mindsdb/integrations/handlers/chromadb_handler/chromadb_handler.py +3 -3
  23. mindsdb/integrations/handlers/flaml_handler/requirements.txt +1 -1
  24. mindsdb/integrations/handlers/google_calendar_handler/google_calendar_tables.py +82 -73
  25. mindsdb/integrations/handlers/hubspot_handler/requirements.txt +1 -1
  26. mindsdb/integrations/handlers/langchain_handler/langchain_handler.py +83 -76
  27. mindsdb/integrations/handlers/lightwood_handler/requirements.txt +4 -4
  28. mindsdb/integrations/handlers/litellm_handler/litellm_handler.py +16 -3
  29. mindsdb/integrations/handlers/litellm_handler/settings.py +2 -1
  30. mindsdb/integrations/handlers/llama_index_handler/requirements.txt +1 -1
  31. mindsdb/integrations/handlers/pgvector_handler/pgvector_handler.py +106 -90
  32. mindsdb/integrations/handlers/postgres_handler/postgres_handler.py +41 -39
  33. mindsdb/integrations/handlers/s3_handler/s3_handler.py +72 -70
  34. mindsdb/integrations/handlers/salesforce_handler/constants.py +208 -0
  35. mindsdb/integrations/handlers/salesforce_handler/salesforce_handler.py +142 -81
  36. mindsdb/integrations/handlers/salesforce_handler/salesforce_tables.py +12 -4
  37. mindsdb/integrations/handlers/slack_handler/slack_tables.py +141 -161
  38. mindsdb/integrations/handlers/tpot_handler/requirements.txt +1 -1
  39. mindsdb/integrations/handlers/web_handler/urlcrawl_helpers.py +32 -17
  40. mindsdb/integrations/handlers/web_handler/web_handler.py +19 -22
  41. mindsdb/integrations/handlers/youtube_handler/youtube_tables.py +183 -55
  42. mindsdb/integrations/libs/vectordatabase_handler.py +10 -1
  43. mindsdb/integrations/utilities/handler_utils.py +32 -12
  44. mindsdb/interfaces/agents/agents_controller.py +169 -110
  45. mindsdb/interfaces/agents/langchain_agent.py +10 -3
  46. mindsdb/interfaces/data_catalog/data_catalog_loader.py +22 -8
  47. mindsdb/interfaces/database/database.py +38 -13
  48. mindsdb/interfaces/database/integrations.py +20 -5
  49. mindsdb/interfaces/database/projects.py +63 -16
  50. mindsdb/interfaces/database/views.py +86 -60
  51. mindsdb/interfaces/jobs/jobs_controller.py +103 -110
  52. mindsdb/interfaces/knowledge_base/controller.py +33 -5
  53. mindsdb/interfaces/knowledge_base/evaluate.py +53 -9
  54. mindsdb/interfaces/knowledge_base/executor.py +24 -0
  55. mindsdb/interfaces/knowledge_base/llm_client.py +3 -3
  56. mindsdb/interfaces/knowledge_base/preprocessing/document_preprocessor.py +21 -13
  57. mindsdb/interfaces/query_context/context_controller.py +100 -133
  58. mindsdb/interfaces/skills/skills_controller.py +18 -6
  59. mindsdb/interfaces/storage/db.py +40 -6
  60. mindsdb/interfaces/variables/variables_controller.py +8 -15
  61. mindsdb/utilities/config.py +3 -3
  62. mindsdb/utilities/functions.py +72 -60
  63. mindsdb/utilities/log.py +38 -6
  64. mindsdb/utilities/ps.py +7 -7
  65. {mindsdb-25.7.2.0.dist-info → mindsdb-25.7.4.0.dist-info}/METADATA +262 -263
  66. {mindsdb-25.7.2.0.dist-info → mindsdb-25.7.4.0.dist-info}/RECORD +69 -68
  67. {mindsdb-25.7.2.0.dist-info → mindsdb-25.7.4.0.dist-info}/WHEEL +0 -0
  68. {mindsdb-25.7.2.0.dist-info → mindsdb-25.7.4.0.dist-info}/licenses/LICENSE +0 -0
  69. {mindsdb-25.7.2.0.dist-info → mindsdb-25.7.4.0.dist-info}/top_level.txt +0 -0
@@ -13,7 +13,10 @@ import pandas as pd
13
13
 
14
14
  from mindsdb.interfaces.agents.safe_output_parser import SafeOutputParser
15
15
  from mindsdb.interfaces.agents.langchain_agent import (
16
- get_llm_provider, get_embedding_model_provider, create_chat_model, get_chat_model_params
16
+ get_llm_provider,
17
+ get_embedding_model_provider,
18
+ create_chat_model,
19
+ get_chat_model_params,
17
20
  )
18
21
 
19
22
  from mindsdb.interfaces.agents.constants import (
@@ -24,19 +27,21 @@ from mindsdb.interfaces.agents.constants import (
24
27
  DEFAULT_MAX_TOKENS,
25
28
  DEFAULT_MODEL_NAME,
26
29
  USER_COLUMN,
27
- ASSISTANT_COLUMN
30
+ ASSISTANT_COLUMN,
28
31
  )
29
32
  from mindsdb.integrations.utilities.rag.settings import DEFAULT_RAG_PROMPT_TEMPLATE
30
33
  from mindsdb.integrations.handlers.langchain_handler.tools import setup_tools
31
34
  from mindsdb.integrations.libs.base import BaseMLEngine
32
35
  from mindsdb.interfaces.storage.model_fs import HandlerStorage, ModelStorage
33
- from mindsdb.integrations.handlers.langchain_embedding_handler.langchain_embedding_handler import construct_model_from_args
34
- from mindsdb.integrations.handlers.openai_handler.constants import CHAT_MODELS # noqa, for dependency checker
36
+ from mindsdb.integrations.handlers.langchain_embedding_handler.langchain_embedding_handler import (
37
+ construct_model_from_args,
38
+ )
39
+ from mindsdb.integrations.handlers.openai_handler.constants import CHAT_MODELS # noqa: F401 - for dependency checker
35
40
 
36
41
  from mindsdb.utilities import log
37
42
  from mindsdb.utilities.context_executor import ContextThreadPoolExecutor
38
43
 
39
- _PARSING_ERROR_PREFIXES = ['An output parsing error occured', 'Could not parse LLM output']
44
+ _PARSING_ERROR_PREFIXES = ["An output parsing error occured", "Could not parse LLM output"]
40
45
 
41
46
  logger = log.getLogger(__name__)
42
47
 
@@ -58,13 +63,10 @@ class LangChainHandler(BaseMLEngine):
58
63
  - python_repl
59
64
  - serper.dev search
60
65
  """
61
- name = 'langchain'
62
66
 
63
- def __init__(
64
- self,
65
- model_storage: ModelStorage,
66
- engine_storage: HandlerStorage,
67
- **kwargs):
67
+ name = "langchain"
68
+
69
+ def __init__(self, model_storage: ModelStorage, engine_storage: HandlerStorage, **kwargs):
68
70
  super().__init__(model_storage, engine_storage, **kwargs)
69
71
  # if True, the target column name does not have to be specified at creation time.
70
72
  self.generative = True
@@ -81,77 +83,78 @@ class LangChainHandler(BaseMLEngine):
81
83
  #
82
84
  # Ideally, in the future, we would write a parser that is more robust and flexible than the one Langchain uses.
83
85
  # Response is wrapped in ``
84
- logger.info('Handling parsing error, salvaging response...')
85
- response_output = response.split('`')
86
+ logger.info("Handling parsing error, salvaging response...")
87
+ response_output = response.split("`")
86
88
  if len(response_output) >= 2:
87
89
  response = response_output[-2]
88
90
 
89
91
  # Wrap response in Langchain conversational react format.
90
- langchain_react_formatted_response = f'''Thought: Do I need to use a tool? No
91
- AI: {response}'''
92
+ langchain_react_formatted_response = f"""Thought: Do I need to use a tool? No
93
+ AI: {response}"""
92
94
  return langchain_react_formatted_response
93
- return f'Agent failed with error:\n{str(error)}...'
95
+ return f"Agent failed with error:\n{str(error)}..."
94
96
 
95
97
  def create(self, target: str, args: Dict = None, **kwargs):
96
- self.default_agent_tools = args.get('tools', self.default_agent_tools)
97
-
98
- args = args['using']
99
- args['target'] = target
100
- args['model_name'] = args.get('model_name', DEFAULT_MODEL_NAME)
101
- args['provider'] = args.get('provider', get_llm_provider(args))
102
- args['embedding_model_provider'] = args.get('embedding_model', get_embedding_model_provider(args))
103
- if args.get('mode') == 'retrieval':
98
+ self.default_agent_tools = args.get("tools", self.default_agent_tools)
99
+
100
+ args = args["using"]
101
+ args["target"] = target
102
+ args["model_name"] = args.get("model_name", DEFAULT_MODEL_NAME)
103
+ args["provider"] = args.get("provider", get_llm_provider(args))
104
+ args["embedding_model_provider"] = args.get("embedding_model", get_embedding_model_provider(args))
105
+ if args.get("mode") == "retrieval":
104
106
  # use default prompt template for retrieval i.e. RAG if not provided
105
107
  if "prompt_template" not in args:
106
108
  args["prompt_template"] = DEFAULT_RAG_PROMPT_TEMPLATE
107
109
 
108
- self.model_storage.json_set('args', args)
110
+ self.model_storage.json_set("args", args)
109
111
 
110
112
  @staticmethod
111
113
  def create_validation(_, args: Dict = None, **kwargs):
112
- if 'using' not in args:
114
+ if "using" not in args:
113
115
  raise Exception("LangChain engine requires a USING clause! Refer to its documentation for more details.")
114
116
  else:
115
- args = args['using']
116
- if 'prompt_template' not in args:
117
- if not args.get('mode') == 'retrieval':
118
- raise ValueError('Please provide a `prompt_template` for this engine.')
117
+ args = args["using"]
118
+ if "prompt_template" not in args:
119
+ if not args.get("mode") == "retrieval":
120
+ raise ValueError("Please provide a `prompt_template` for this engine.")
119
121
 
120
122
  def predict(self, df: pd.DataFrame, args: Dict = None) -> pd.DataFrame:
121
123
  """
122
124
  Dispatch is performed depending on the underlying model type. Currently, only the default text completion
123
125
  is supported.
124
126
  """
125
- pred_args = args['predict_params'] if args else {}
126
- args = self.model_storage.json_get('args')
127
- if 'prompt_template' not in args and 'prompt_template' not in pred_args:
127
+ pred_args = args["predict_params"] if args else {}
128
+ args = self.model_storage.json_get("args")
129
+ if "prompt_template" not in args and "prompt_template" not in pred_args:
128
130
  raise ValueError("This model expects a `prompt_template`, please provide one.")
129
131
  # Back compatibility for old models
130
- args['provider'] = args.get('provider', get_llm_provider(args))
131
- args['embedding_model_provider'] = args.get('embedding_model', get_embedding_model_provider(args))
132
+ args["provider"] = args.get("provider", get_llm_provider(args))
133
+ args["embedding_model_provider"] = args.get("embedding_model", get_embedding_model_provider(args))
132
134
 
133
135
  df = df.reset_index(drop=True)
134
136
 
135
- if pred_args.get('mode') == 'chat_model':
137
+ if pred_args.get("mode") == "chat_model":
136
138
  return self.call_llm(df, args, pred_args)
137
139
 
138
140
  agent = self.create_agent(df, args, pred_args)
139
141
  # Use last message as prompt, remove other questions.
140
- user_column = args.get('user_column', USER_COLUMN)
142
+ user_column = args.get("user_column", USER_COLUMN)
141
143
  if user_column not in df.columns:
142
144
  raise Exception(
143
- f"Expected user input in column `{user_column}`, which is not found in the input data. Either provide the column, or redefine the expected column at model creation (`USING user_column = 'value'`)") # noqa
145
+ f"Expected user input in column `{user_column}`, which is not found in the input data. Either provide the column, or redefine the expected column at model creation (`USING user_column = 'value'`)"
146
+ ) # noqa
144
147
  df.iloc[:-1, df.columns.get_loc(user_column)] = None
145
148
  return self.run_agent(df, agent, args, pred_args)
146
149
 
147
150
  def call_llm(self, df, args=None, pred_args=None):
148
151
  llm = create_chat_model({**args, **pred_args})
149
152
 
150
- user_column = args.get('user_column', USER_COLUMN)
151
- assistant_column = args.get('assistant_column', ASSISTANT_COLUMN)
153
+ user_column = args.get("user_column", USER_COLUMN)
154
+ assistant_column = args.get("assistant_column", ASSISTANT_COLUMN)
152
155
 
153
156
  question = df[user_column].iloc[-1]
154
- resp = llm([HumanMessage(question)], stop=['\nObservation:', '\n\tObservation:'])
157
+ resp = llm([HumanMessage(question)], stop=["\nObservation:", "\n\tObservation:"])
155
158
 
156
159
  return pd.DataFrame([resp.content], columns=[assistant_column])
157
160
 
@@ -162,25 +165,22 @@ AI: {response}'''
162
165
  model_kwargs = get_chat_model_params({**args, **pred_args})
163
166
  llm = create_chat_model({**args, **pred_args})
164
167
 
165
- tools = setup_tools(llm,
166
- model_kwargs,
167
- pred_args,
168
- self.default_agent_tools)
168
+ tools = setup_tools(llm, model_kwargs, pred_args, self.default_agent_tools)
169
169
 
170
170
  # Prefer prediction prompt template over original if provided.
171
- prompt_template = pred_args.get('prompt_template', args['prompt_template'])
172
- if 'context' in pred_args:
173
- prompt_template += '\n\n' + 'Useful information:\n' + pred_args['context'] + '\n'
171
+ prompt_template = pred_args.get("prompt_template", args["prompt_template"])
172
+ if "context" in pred_args:
173
+ prompt_template += "\n\n" + "Useful information:\n" + pred_args["context"] + "\n"
174
174
 
175
175
  # Set up memory.
176
- memory = ConversationSummaryBufferMemory(llm=llm,
177
- max_token_limit=model_kwargs.get('max_tokens', DEFAULT_MAX_TOKENS),
178
- memory_key='chat_history')
176
+ memory = ConversationSummaryBufferMemory(
177
+ llm=llm, max_token_limit=model_kwargs.get("max_tokens", DEFAULT_MAX_TOKENS), memory_key="chat_history"
178
+ )
179
179
  memory.chat_memory.messages.insert(0, SystemMessage(content=prompt_template))
180
180
  # User - Assistant conversation. All except the last message.
181
- user_column = args.get('user_column', USER_COLUMN)
182
- assistant_column = args.get('assistant_column', ASSISTANT_COLUMN)
183
- for row in df[:-1].to_dict('records'):
181
+ user_column = args.get("user_column", USER_COLUMN)
182
+ assistant_column = args.get("assistant_column", ASSISTANT_COLUMN)
183
+ for row in df[:-1].to_dict("records"):
184
184
  question = row[user_column]
185
185
  answer = row[assistant_column]
186
186
  if question:
@@ -188,45 +188,47 @@ AI: {response}'''
188
188
  if answer:
189
189
  memory.chat_memory.add_ai_message(answer)
190
190
 
191
- agent_type = args.get('agent_type', DEFAULT_AGENT_TYPE)
191
+ agent_type = args.get("agent_type", DEFAULT_AGENT_TYPE)
192
192
  agent_executor = initialize_agent(
193
193
  tools,
194
194
  llm,
195
195
  agent=agent_type,
196
196
  # Use custom output parser to handle flaky LLMs that don't ALWAYS conform to output format.
197
- agent_kwargs={'output_parser': SafeOutputParser()},
197
+ agent_kwargs={"output_parser": SafeOutputParser()},
198
198
  # Calls the agent’s LLM Chain one final time to generate a final answer based on the previous steps
199
- early_stopping_method='generate',
199
+ early_stopping_method="generate",
200
200
  handle_parsing_errors=self._handle_parsing_errors,
201
201
  # Timeout per agent invocation.
202
- max_execution_time=pred_args.get('timeout_seconds', args.get('timeout_seconds', DEFAULT_AGENT_TIMEOUT_SECONDS)),
203
- max_iterations=pred_args.get('max_iterations', args.get('max_iterations', DEFAULT_MAX_ITERATIONS)),
202
+ max_execution_time=pred_args.get(
203
+ "timeout_seconds", args.get("timeout_seconds", DEFAULT_AGENT_TIMEOUT_SECONDS)
204
+ ),
205
+ max_iterations=pred_args.get("max_iterations", args.get("max_iterations", DEFAULT_MAX_ITERATIONS)),
204
206
  memory=memory,
205
- verbose=pred_args.get('verbose', args.get('verbose', True))
207
+ verbose=pred_args.get("verbose", args.get("verbose", True)),
206
208
  )
207
209
  return agent_executor
208
210
 
209
211
  def run_agent(self, df: pd.DataFrame, agent: AgentExecutor, args: Dict, pred_args: Dict) -> pd.DataFrame:
210
212
  # Prefer prediction time prompt template, if available.
211
- base_template = pred_args.get('prompt_template', args['prompt_template'])
213
+ base_template = pred_args.get("prompt_template", args["prompt_template"])
212
214
 
213
215
  input_variables = []
214
216
  matches = list(re.finditer("{{(.*?)}}", base_template))
215
217
 
216
218
  for m in matches:
217
- input_variables.append(m[0].replace('{', '').replace('}', ''))
219
+ input_variables.append(m[0].replace("{", "").replace("}", ""))
218
220
  empty_prompt_ids = np.where(df[input_variables].isna().all(axis=1).values)[0]
219
221
 
220
- base_template = base_template.replace('{{', '{').replace('}}', '}')
222
+ base_template = base_template.replace("{{", "{").replace("}}", "}")
221
223
  prompts = []
222
224
 
223
- user_column = args.get('user_column', USER_COLUMN)
225
+ user_column = args.get("user_column", USER_COLUMN)
224
226
  for i, row in df.iterrows():
225
227
  if i not in empty_prompt_ids:
226
228
  prompt = PromptTemplate(input_variables=input_variables, template=base_template)
227
229
  kwargs = {}
228
230
  for col in input_variables:
229
- kwargs[col] = row[col] if row[col] is not None else '' # add empty quote if data is missing
231
+ kwargs[col] = row[col] if row[col] is not None else "" # add empty quote if data is missing
230
232
  prompts.append(prompt.format(**kwargs))
231
233
  elif row.get(user_column):
232
234
  # Just add prompt
@@ -234,32 +236,37 @@ AI: {response}'''
234
236
 
235
237
  def _invoke_agent_executor_with_prompt(agent_executor, prompt):
236
238
  if not prompt:
237
- return ''
239
+ return ""
238
240
  try:
239
241
  answer = agent_executor.invoke(prompt)
240
242
  except Exception as e:
241
243
  answer = str(e)
242
244
  if not answer.startswith("Could not parse LLM output: `"):
243
245
  raise e
244
- answer = {'output': answer.removeprefix("Could not parse LLM output: `").removesuffix("`")}
246
+ answer = {"output": answer.removeprefix("Could not parse LLM output: `").removesuffix("`")}
245
247
 
246
- if 'output' not in answer:
248
+ if "output" not in answer:
247
249
  # This should never happen unless Langchain changes invoke output format, but just in case.
248
250
  return agent_executor.run(prompt)
249
- return answer['output']
251
+ return answer["output"]
250
252
 
251
253
  completions = []
252
254
  # max_workers defaults to number of processors on the machine multiplied by 5.
253
255
  # https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
254
- max_workers = args.get('max_workers', None)
255
- agent_timeout_seconds = args.get('timeout', DEFAULT_AGENT_TIMEOUT_SECONDS)
256
+ max_workers = args.get("max_workers", None)
257
+ agent_timeout_seconds = args.get("timeout", DEFAULT_AGENT_TIMEOUT_SECONDS)
256
258
  executor = ContextThreadPoolExecutor(max_workers=max_workers)
257
259
  futures = [executor.submit(_invoke_agent_executor_with_prompt, agent, prompt) for prompt in prompts]
258
260
  try:
259
261
  for future in as_completed(futures, timeout=agent_timeout_seconds):
260
262
  completions.append(future.result())
261
263
  except TimeoutError:
262
- completions.append("I'm sorry! I couldn't come up with a response in time. Please try again.")
264
+ completions.append(
265
+ f"I'm sorry! I couldn't generate a response within the allotted time ({agent_timeout_seconds} seconds). "
266
+ "If you need more time for processing, you can adjust the timeout settings. "
267
+ "Please refer to the documentation for instructions on how to change the timeout value. "
268
+ "Feel free to try your request again."
269
+ )
263
270
  # Can't use ThreadPoolExecutor as context manager since we need wait=False.
264
271
  executor.shutdown(wait=False)
265
272
 
@@ -267,13 +274,13 @@ AI: {response}'''
267
274
  for i in sorted(empty_prompt_ids)[:-1]:
268
275
  completions.insert(i, None)
269
276
 
270
- pred_df = pd.DataFrame(completions, columns=[args['target']])
277
+ pred_df = pd.DataFrame(completions, columns=[args["target"]])
271
278
 
272
279
  return pred_df
273
280
 
274
281
  def describe(self, attribute: Optional[str] = None) -> pd.DataFrame:
275
- tables = ['info']
276
- return pd.DataFrame(tables, columns=['tables'])
282
+ tables = ["info"]
283
+ return pd.DataFrame(tables, columns=["tables"])
277
284
 
278
285
  def finetune(self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> None:
279
- raise NotImplementedError('Fine-tuning is not supported for LangChain models')
286
+ raise NotImplementedError("Fine-tuning is not supported for LangChain models")
@@ -1,4 +1,4 @@
1
- lightwood>=25.5.2.2
2
- lightwood[extra]>=25.5.2.2
3
- lightwood[xai]>=25.5.2.2
4
- type_infer==0.0.22
1
+ lightwood>=25.7.5.1
2
+ lightwood[extra]>=25.7.5.1
3
+ lightwood[xai]>=25.7.5.1
4
+ type_infer==0.0.23
@@ -2,7 +2,8 @@ import ast
2
2
  from typing import Dict, Optional, List
3
3
 
4
4
 
5
- from litellm import completion, batch_completion, embedding, acompletion
5
+ from litellm import completion, batch_completion, embedding, acompletion, supports_response_schema
6
+
6
7
  import pandas as pd
7
8
 
8
9
  from mindsdb.integrations.libs.base import BaseMLEngine
@@ -58,6 +59,15 @@ class LiteLLMHandler(BaseMLEngine):
58
59
  @classmethod
59
60
  def completion(cls, provider: str, model: str, messages: List[dict], args: dict):
60
61
  model, args = cls.prepare_arguments(provider, model, args)
62
+ json_output = args.pop("json_output", False)
63
+
64
+ supports_json_output = supports_response_schema(model=model, custom_llm_provider=provider)
65
+
66
+ if json_output and supports_json_output:
67
+ args["response_format"] = {"type": "json_object"}
68
+ else:
69
+ args["response_format"] = None
70
+
61
71
  return completion(model=model, messages=messages, stream=False, **args)
62
72
 
63
73
  def create(
@@ -77,6 +87,7 @@ class LiteLLMHandler(BaseMLEngine):
77
87
 
78
88
  # check engine_storage for api_key
79
89
  input_args.update({k: v for k, v in ml_engine_args.items()})
90
+ input_args["target"] = target
80
91
 
81
92
  # validate args
82
93
  export_args = CompletionParameters(**input_args).model_dump()
@@ -94,6 +105,8 @@ class LiteLLMHandler(BaseMLEngine):
94
105
  # validate args
95
106
  args = CompletionParameters(**input_args).model_dump()
96
107
 
108
+ target = args.pop("target")
109
+
97
110
  # build messages
98
111
  self._build_messages(args, df)
99
112
 
@@ -103,12 +116,12 @@ class LiteLLMHandler(BaseMLEngine):
103
116
  if len(args["messages"]) > 1:
104
117
  # if more than one message, use batch completion
105
118
  responses = batch_completion(**args)
106
- return pd.DataFrame({"result": [response.choices[0].message.content for response in responses]})
119
+ return pd.DataFrame({target: [response.choices[0].message.content for response in responses]})
107
120
 
108
121
  # run completion
109
122
  response = completion(**args)
110
123
 
111
- return pd.DataFrame({"result": [response.choices[0].message.content]})
124
+ return pd.DataFrame({target: [response.choices[0].message.content]})
112
125
 
113
126
  @staticmethod
114
127
  def _prompt_to_messages(prompt: str, **kwargs) -> List[Dict]:
@@ -31,7 +31,8 @@ class CompletionParameters(BaseModel):
31
31
  # set api_base, api_version, api_key
32
32
  base_url: Optional[str] = None # Base URL of the API.
33
33
  api_version: Optional[str] = None # Version of the API to be used.
34
- api_key: str # API key for authentication.
34
+ api_key: Optional[str] = None # API key for authentication.
35
+ target: Optional[str] = None # the name of output column
35
36
 
36
37
  class Config:
37
38
  extra = Extra.forbid
@@ -1,4 +1,4 @@
1
- llama-index==0.12.28
1
+ llama-index==0.12.41
2
2
  pydantic-settings >= 2.1.0
3
3
  llama-index-readers-web
4
4
  llama-index-embeddings-openai