ag2 0.3.2b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (112) hide show
  1. ag2-0.3.2b2.dist-info/LICENSE +201 -0
  2. ag2-0.3.2b2.dist-info/METADATA +490 -0
  3. ag2-0.3.2b2.dist-info/NOTICE.md +19 -0
  4. ag2-0.3.2b2.dist-info/RECORD +112 -0
  5. ag2-0.3.2b2.dist-info/WHEEL +5 -0
  6. ag2-0.3.2b2.dist-info/top_level.txt +1 -0
  7. autogen/__init__.py +17 -0
  8. autogen/_pydantic.py +116 -0
  9. autogen/agentchat/__init__.py +26 -0
  10. autogen/agentchat/agent.py +142 -0
  11. autogen/agentchat/assistant_agent.py +85 -0
  12. autogen/agentchat/chat.py +306 -0
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +785 -0
  15. autogen/agentchat/contrib/agent_optimizer.py +450 -0
  16. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  17. autogen/agentchat/contrib/capabilities/agent_capability.py +21 -0
  18. autogen/agentchat/contrib/capabilities/generate_images.py +297 -0
  19. autogen/agentchat/contrib/capabilities/teachability.py +406 -0
  20. autogen/agentchat/contrib/capabilities/text_compressors.py +72 -0
  21. autogen/agentchat/contrib/capabilities/transform_messages.py +92 -0
  22. autogen/agentchat/contrib/capabilities/transforms.py +565 -0
  23. autogen/agentchat/contrib/capabilities/transforms_util.py +120 -0
  24. autogen/agentchat/contrib/capabilities/vision_capability.py +217 -0
  25. autogen/agentchat/contrib/gpt_assistant_agent.py +545 -0
  26. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  27. autogen/agentchat/contrib/graph_rag/document.py +24 -0
  28. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +76 -0
  29. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +50 -0
  30. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +56 -0
  31. autogen/agentchat/contrib/img_utils.py +390 -0
  32. autogen/agentchat/contrib/llamaindex_conversable_agent.py +114 -0
  33. autogen/agentchat/contrib/llava_agent.py +176 -0
  34. autogen/agentchat/contrib/math_user_proxy_agent.py +471 -0
  35. autogen/agentchat/contrib/multimodal_conversable_agent.py +128 -0
  36. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
  37. autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
  38. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +701 -0
  39. autogen/agentchat/contrib/society_of_mind_agent.py +203 -0
  40. autogen/agentchat/contrib/text_analyzer_agent.py +76 -0
  41. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  42. autogen/agentchat/contrib/vectordb/base.py +243 -0
  43. autogen/agentchat/contrib/vectordb/chromadb.py +326 -0
  44. autogen/agentchat/contrib/vectordb/mongodb.py +559 -0
  45. autogen/agentchat/contrib/vectordb/pgvectordb.py +958 -0
  46. autogen/agentchat/contrib/vectordb/qdrant.py +334 -0
  47. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  48. autogen/agentchat/contrib/web_surfer.py +305 -0
  49. autogen/agentchat/conversable_agent.py +2904 -0
  50. autogen/agentchat/groupchat.py +1666 -0
  51. autogen/agentchat/user_proxy_agent.py +109 -0
  52. autogen/agentchat/utils.py +207 -0
  53. autogen/browser_utils.py +291 -0
  54. autogen/cache/__init__.py +10 -0
  55. autogen/cache/abstract_cache_base.py +78 -0
  56. autogen/cache/cache.py +182 -0
  57. autogen/cache/cache_factory.py +85 -0
  58. autogen/cache/cosmos_db_cache.py +150 -0
  59. autogen/cache/disk_cache.py +109 -0
  60. autogen/cache/in_memory_cache.py +61 -0
  61. autogen/cache/redis_cache.py +128 -0
  62. autogen/code_utils.py +745 -0
  63. autogen/coding/__init__.py +22 -0
  64. autogen/coding/base.py +113 -0
  65. autogen/coding/docker_commandline_code_executor.py +262 -0
  66. autogen/coding/factory.py +45 -0
  67. autogen/coding/func_with_reqs.py +203 -0
  68. autogen/coding/jupyter/__init__.py +22 -0
  69. autogen/coding/jupyter/base.py +32 -0
  70. autogen/coding/jupyter/docker_jupyter_server.py +164 -0
  71. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  72. autogen/coding/jupyter/jupyter_client.py +224 -0
  73. autogen/coding/jupyter/jupyter_code_executor.py +161 -0
  74. autogen/coding/jupyter/local_jupyter_server.py +168 -0
  75. autogen/coding/local_commandline_code_executor.py +410 -0
  76. autogen/coding/markdown_code_extractor.py +44 -0
  77. autogen/coding/utils.py +57 -0
  78. autogen/exception_utils.py +46 -0
  79. autogen/extensions/__init__.py +0 -0
  80. autogen/formatting_utils.py +76 -0
  81. autogen/function_utils.py +362 -0
  82. autogen/graph_utils.py +148 -0
  83. autogen/io/__init__.py +15 -0
  84. autogen/io/base.py +105 -0
  85. autogen/io/console.py +43 -0
  86. autogen/io/websockets.py +213 -0
  87. autogen/logger/__init__.py +11 -0
  88. autogen/logger/base_logger.py +140 -0
  89. autogen/logger/file_logger.py +287 -0
  90. autogen/logger/logger_factory.py +29 -0
  91. autogen/logger/logger_utils.py +42 -0
  92. autogen/logger/sqlite_logger.py +459 -0
  93. autogen/math_utils.py +356 -0
  94. autogen/oai/__init__.py +33 -0
  95. autogen/oai/anthropic.py +428 -0
  96. autogen/oai/bedrock.py +600 -0
  97. autogen/oai/cerebras.py +264 -0
  98. autogen/oai/client.py +1148 -0
  99. autogen/oai/client_utils.py +167 -0
  100. autogen/oai/cohere.py +453 -0
  101. autogen/oai/completion.py +1216 -0
  102. autogen/oai/gemini.py +469 -0
  103. autogen/oai/groq.py +281 -0
  104. autogen/oai/mistral.py +279 -0
  105. autogen/oai/ollama.py +576 -0
  106. autogen/oai/openai_utils.py +810 -0
  107. autogen/oai/together.py +343 -0
  108. autogen/retrieve_utils.py +487 -0
  109. autogen/runtime_logging.py +163 -0
  110. autogen/token_count_utils.py +257 -0
  111. autogen/types.py +20 -0
  112. autogen/version.py +7 -0
autogen/oai/ollama.py ADDED
@@ -0,0 +1,576 @@
1
+ """Create an OpenAI-compatible client using Ollama's API.
2
+
3
+ Example:
4
+ llm_config={
5
+ "config_list": [{
6
+ "api_type": "ollama",
7
+ "model": "mistral:7b-instruct-v0.3-q6_K"
8
+ }
9
+ ]}
10
+
11
+ agent = autogen.AssistantAgent("my_agent", llm_config=llm_config)
12
+
13
+ Install Ollama's python library using: pip install --upgrade ollama
14
+ Install fix-busted-json library: pip install --upgrade fix-busted-json
15
+
16
+ Resources:
17
+ - https://github.com/ollama/ollama-python
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import copy
23
+ import json
24
+ import random
25
+ import re
26
+ import time
27
+ import warnings
28
+ from typing import Any, Dict, List, Tuple
29
+
30
+ import ollama
31
+ from fix_busted_json import repair_json
32
+ from ollama import Client
33
+ from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
34
+ from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
35
+ from openai.types.completion_usage import CompletionUsage
36
+
37
+ from autogen.oai.client_utils import should_hide_tools, validate_parameter
38
+
39
+
40
+ class OllamaClient:
41
+ """Client for Ollama's API."""
42
+
43
+ # Defaults for manual tool calling
44
+ # Instruction is added to the first system message and provides directions to follow a two step
45
+ # process
46
+ # 1. (before tools have been called) Return JSON with the functions to call
47
+ # 2. (directly after tools have been called) Return Text describing the results of the function calls in text format
48
+
49
+ # Override using "manual_tool_call_instruction" config parameter
50
+ TOOL_CALL_MANUAL_INSTRUCTION = (
51
+ "You are to follow a strict two step process that will occur over "
52
+ "a number of interactions, so pay attention to what step you are in based on the full "
53
+ "conversation. We will be taking turns so only do one step at a time so don't perform step "
54
+ "2 until step 1 is complete and I've told you the result. The first step is to choose one "
55
+ "or more functions based on the request given and return only JSON with the functions and "
56
+ "arguments to use. The second step is to analyse the given output of the function and summarise "
57
+ "it returning only TEXT and not Python or JSON. "
58
+ "For argument values, be sure numbers aren't strings, they should not have double quotes around them. "
59
+ "In terms of your response format, for step 1 return only JSON and NO OTHER text, "
60
+ "for step 2 return only text and NO JSON/Python/Markdown. "
61
+ 'The format for running a function is [{"name": "function_name1", "arguments":{"argument_name": "argument_value"}},{"name": "function_name2", "arguments":{"argument_name": "argument_value"}}] '
62
+ 'Make sure the keys "name" and "arguments" are as described. '
63
+ "If you don't get the format correct, try again. "
64
+ "The following functions are available to you:[FUNCTIONS_LIST]"
65
+ )
66
+
67
+ # Appended to the last user message if no tools have been called
68
+ # Override using "manual_tool_call_step1" config parameter
69
+ TOOL_CALL_MANUAL_STEP1 = " (proceed with step 1)"
70
+
71
+ # Appended to the user message after tools have been executed. Will create a 'user' message if one doesn't exist.
72
+ # Override using "manual_tool_call_step2" config parameter
73
+ TOOL_CALL_MANUAL_STEP2 = " (proceed with step 2)"
74
+
75
+ def __init__(self, **kwargs):
76
+ """Note that no api_key or environment variable is required for Ollama.
77
+
78
+ Args:
79
+ None
80
+ """
81
+
82
+ def message_retrieval(self, response) -> List:
83
+ """
84
+ Retrieve and return a list of strings or a list of Choice.Message from the response.
85
+
86
+ NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
87
+ since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
88
+ """
89
+ return [choice.message for choice in response.choices]
90
+
91
+ def cost(self, response) -> float:
92
+ return response.cost
93
+
94
+ @staticmethod
95
+ def get_usage(response) -> Dict:
96
+ """Return usage summary of the response using RESPONSE_USAGE_KEYS."""
97
+ # ... # pragma: no cover
98
+ return {
99
+ "prompt_tokens": response.usage.prompt_tokens,
100
+ "completion_tokens": response.usage.completion_tokens,
101
+ "total_tokens": response.usage.total_tokens,
102
+ "cost": response.cost,
103
+ "model": response.model,
104
+ }
105
+
106
+ def parse_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
107
+ """Loads the parameters for Ollama API from the passed in parameters and returns a validated set. Checks types, ranges, and sets defaults"""
108
+ ollama_params = {}
109
+
110
+ # Check that we have what we need to use Ollama's API
111
+ # https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion
112
+
113
+ # The main parameters are model, prompt, stream, and options
114
+ # Options is a dictionary of parameters for the model
115
+ # There are other, advanced, parameters such as format, system (to override system message), template, raw, etc. - not used
116
+
117
+ # We won't enforce the available models
118
+ ollama_params["model"] = params.get("model", None)
119
+ assert ollama_params[
120
+ "model"
121
+ ], "Please specify the 'model' in your config list entry to nominate the Ollama model to use."
122
+
123
+ ollama_params["stream"] = validate_parameter(params, "stream", bool, True, False, None, None)
124
+
125
+ # Build up the options dictionary
126
+ # https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
127
+ options_dict = {}
128
+
129
+ if "num_predict" in params:
130
+ # Maximum number of tokens to predict, note: -1 is infinite, -2 is fill context, 128 is default
131
+ options_dict["num_predict"] = validate_parameter(params, "num_predict", int, False, 128, None, None)
132
+
133
+ if "repeat_penalty" in params:
134
+ options_dict["repeat_penalty"] = validate_parameter(
135
+ params, "repeat_penalty", (int, float), False, 1.1, None, None
136
+ )
137
+
138
+ if "seed" in params:
139
+ options_dict["seed"] = validate_parameter(params, "seed", int, False, 42, None, None)
140
+
141
+ if "temperature" in params:
142
+ options_dict["temperature"] = validate_parameter(
143
+ params, "temperature", (int, float), False, 0.8, None, None
144
+ )
145
+
146
+ if "top_k" in params:
147
+ options_dict["top_k"] = validate_parameter(params, "top_k", int, False, 40, None, None)
148
+
149
+ if "top_p" in params:
150
+ options_dict["top_p"] = validate_parameter(params, "top_p", (int, float), False, 0.9, None, None)
151
+
152
+ if self._native_tool_calls and self._tools_in_conversation and not self._should_hide_tools:
153
+ ollama_params["tools"] = params["tools"]
154
+
155
+ # Ollama doesn't support streaming with tools natively
156
+ if ollama_params["stream"] and self._native_tool_calls:
157
+ warnings.warn(
158
+ "Streaming is not supported when using tools and 'Native' tool calling, streaming will be disabled.",
159
+ UserWarning,
160
+ )
161
+
162
+ ollama_params["stream"] = False
163
+
164
+ if not self._native_tool_calls and self._tools_in_conversation:
165
+ # For manual tool calling we have injected the available tools into the prompt
166
+ # and we don't want to force JSON mode
167
+ ollama_params["format"] = "" # Don't force JSON for manual tool calling mode
168
+
169
+ if len(options_dict) != 0:
170
+ ollama_params["options"] = options_dict
171
+
172
+ return ollama_params
173
+
174
+ def create(self, params: Dict) -> ChatCompletion:
175
+
176
+ messages = params.get("messages", [])
177
+
178
+ # Are tools involved in this conversation?
179
+ self._tools_in_conversation = "tools" in params
180
+
181
+ # We provide second-level filtering out of tools to avoid LLMs re-calling tools continuously
182
+ if self._tools_in_conversation:
183
+ hide_tools = validate_parameter(
184
+ params, "hide_tools", str, False, "never", None, ["if_all_run", "if_any_run", "never"]
185
+ )
186
+ self._should_hide_tools = should_hide_tools(messages, params["tools"], hide_tools)
187
+ else:
188
+ self._should_hide_tools = False
189
+
190
+ # Are we using native Ollama tool calling, otherwise we're doing manual tool calling
191
+ # We allow the user to decide if they want to use Ollama's tool calling
192
+ # or for tool calling to be handled manually through text messages
193
+ # Default is True = Ollama's tool calling
194
+ self._native_tool_calls = validate_parameter(params, "native_tool_calls", bool, False, True, None, None)
195
+
196
+ if not self._native_tool_calls:
197
+ # Load defaults
198
+ self._manual_tool_call_instruction = validate_parameter(
199
+ params, "manual_tool_call_instruction", str, False, self.TOOL_CALL_MANUAL_INSTRUCTION, None, None
200
+ )
201
+ self._manual_tool_call_step1 = validate_parameter(
202
+ params, "manual_tool_call_step1", str, False, self.TOOL_CALL_MANUAL_STEP1, None, None
203
+ )
204
+ self._manual_tool_call_step2 = validate_parameter(
205
+ params, "manual_tool_call_step2", str, False, self.TOOL_CALL_MANUAL_STEP2, None, None
206
+ )
207
+
208
+ # Convert AutoGen messages to Ollama messages
209
+ ollama_messages = self.oai_messages_to_ollama_messages(
210
+ messages,
211
+ (
212
+ params["tools"]
213
+ if (not self._native_tool_calls and self._tools_in_conversation) and not self._should_hide_tools
214
+ else None
215
+ ),
216
+ )
217
+
218
+ # Parse parameters to the Ollama API's parameters
219
+ ollama_params = self.parse_params(params)
220
+
221
+ ollama_params["messages"] = ollama_messages
222
+
223
+ # Token counts will be returned
224
+ prompt_tokens = 0
225
+ completion_tokens = 0
226
+ total_tokens = 0
227
+
228
+ ans = None
229
+ if "client_host" in params:
230
+ client = Client(host=params["client_host"])
231
+ response = client.chat(**ollama_params)
232
+ else:
233
+ response = ollama.chat(**ollama_params)
234
+
235
+ if ollama_params["stream"]:
236
+ # Read in the chunks as they stream, taking in tool_calls which may be across
237
+ # multiple chunks if more than one suggested
238
+ ans = ""
239
+ for chunk in response:
240
+ ans = ans + (chunk["message"]["content"] or "")
241
+
242
+ if "done_reason" in chunk:
243
+ prompt_tokens = chunk["prompt_eval_count"] if "prompt_eval_count" in chunk else 0
244
+ completion_tokens = chunk["eval_count"] if "eval_count" in chunk else 0
245
+ total_tokens = prompt_tokens + completion_tokens
246
+ else:
247
+ # Non-streaming finished
248
+ ans: str = response["message"]["content"]
249
+
250
+ prompt_tokens = response["prompt_eval_count"] if "prompt_eval_count" in response else 0
251
+ completion_tokens = response["eval_count"] if "eval_count" in response else 0
252
+ total_tokens = prompt_tokens + completion_tokens
253
+
254
+ if response is not None:
255
+
256
+ # Defaults
257
+ ollama_finish = "stop"
258
+ tool_calls = None
259
+
260
+ # Id and streaming text into response
261
+ if ollama_params["stream"]:
262
+ response_content = ans
263
+ response_id = chunk["created_at"]
264
+ else:
265
+ response_content = response["message"]["content"]
266
+ response_id = response["created_at"]
267
+
268
+ # Process tools in the response
269
+ if self._tools_in_conversation:
270
+
271
+ if self._native_tool_calls:
272
+
273
+ if not ollama_params["stream"]:
274
+ response_content = response["message"]["content"]
275
+
276
+ # Native tool calling
277
+ if "tool_calls" in response["message"]:
278
+ ollama_finish = "tool_calls"
279
+ tool_calls = []
280
+ random_id = random.randint(0, 10000)
281
+ for tool_call in response["message"]["tool_calls"]:
282
+ tool_calls.append(
283
+ ChatCompletionMessageToolCall(
284
+ id="ollama_func_{}".format(random_id),
285
+ function={
286
+ "name": tool_call["function"]["name"],
287
+ "arguments": json.dumps(tool_call["function"]["arguments"]),
288
+ },
289
+ type="function",
290
+ )
291
+ )
292
+
293
+ random_id += 1
294
+
295
+ elif not self._native_tool_calls:
296
+
297
+ # Try to convert the response to a tool call object
298
+ response_toolcalls = response_to_tool_call(ans)
299
+
300
+ # If we can, then we've got tool call(s)
301
+ if response_toolcalls is not None:
302
+ ollama_finish = "tool_calls"
303
+ tool_calls = []
304
+ random_id = random.randint(0, 10000)
305
+
306
+ for json_function in response_toolcalls:
307
+ tool_calls.append(
308
+ ChatCompletionMessageToolCall(
309
+ id="ollama_manual_func_{}".format(random_id),
310
+ function={
311
+ "name": json_function["name"],
312
+ "arguments": (
313
+ json.dumps(json_function["arguments"])
314
+ if "arguments" in json_function
315
+ else "{}"
316
+ ),
317
+ },
318
+ type="function",
319
+ )
320
+ )
321
+
322
+ random_id += 1
323
+
324
+ # Blank the message content
325
+ response_content = ""
326
+
327
+ else:
328
+ raise RuntimeError("Failed to get response from Ollama.")
329
+
330
+ # Convert response to AutoGen response
331
+ message = ChatCompletionMessage(
332
+ role="assistant",
333
+ content=response_content,
334
+ function_call=None,
335
+ tool_calls=tool_calls,
336
+ )
337
+ choices = [Choice(finish_reason=ollama_finish, index=0, message=message)]
338
+
339
+ response_oai = ChatCompletion(
340
+ id=response_id,
341
+ model=ollama_params["model"],
342
+ created=int(time.time()),
343
+ object="chat.completion",
344
+ choices=choices,
345
+ usage=CompletionUsage(
346
+ prompt_tokens=prompt_tokens,
347
+ completion_tokens=completion_tokens,
348
+ total_tokens=total_tokens,
349
+ ),
350
+ cost=0, # Local models, FREE!
351
+ )
352
+
353
+ return response_oai
354
+
355
+ def oai_messages_to_ollama_messages(self, messages: list[Dict[str, Any]], tools: list) -> list[dict[str, Any]]:
356
+ """Convert messages from OAI format to Ollama's format.
357
+ We correct for any specific role orders and types, and convert tools to messages (as Ollama can't use tool messages)
358
+ """
359
+
360
+ ollama_messages = copy.deepcopy(messages)
361
+
362
+ # Remove the name field
363
+ for message in ollama_messages:
364
+ if "name" in message:
365
+ message.pop("name", None)
366
+
367
+ # Having a 'system' message on the end does not work well with Ollama, so we change it to 'user'
368
+ # 'system' messages on the end are typical of the summarisation message: summary_method="reflection_with_llm"
369
+ if len(ollama_messages) > 1 and ollama_messages[-1]["role"] == "system":
370
+ ollama_messages[-1]["role"] = "user"
371
+
372
+ # Process messages for tool calling manually
373
+ if tools is not None and not self._native_tool_calls:
374
+ # 1. We need to append instructions to the starting system message on function calling
375
+ # 2. If we have not yet called tools we append "step 1 instruction" to the latest user message
376
+ # 3. If we have already called tools we append "step 2 instruction" to the latest user message
377
+
378
+ have_tool_calls = False
379
+ have_tool_results = False
380
+ last_tool_result_index = -1
381
+
382
+ for i, message in enumerate(ollama_messages):
383
+ if "tool_calls" in message:
384
+ have_tool_calls = True
385
+ if "tool_call_id" in message:
386
+ have_tool_results = True
387
+ last_tool_result_index = i
388
+
389
+ tool_result_is_last_msg = have_tool_results and last_tool_result_index == len(ollama_messages) - 1
390
+
391
+ if ollama_messages[0]["role"] == "system":
392
+ manual_instruction = self._manual_tool_call_instruction
393
+
394
+ # Build a string of the functions available
395
+ functions_string = ""
396
+ for function in tools:
397
+ functions_string += f"""\n{function}\n"""
398
+
399
+ # Replace single quotes with double questions - Not sure why this helps the LLM perform
400
+ # better, but it seems to. Monitor and remove if not necessary.
401
+ functions_string = functions_string.replace("'", '"')
402
+
403
+ manual_instruction = manual_instruction.replace("[FUNCTIONS_LIST]", functions_string)
404
+
405
+ # Update the system message with the instructions and functions
406
+ ollama_messages[0]["content"] = ollama_messages[0]["content"] + manual_instruction.rstrip()
407
+
408
+ # If we are still in the function calling or evaluating process, append the steps instruction
409
+ if not have_tool_calls or tool_result_is_last_msg:
410
+ if ollama_messages[0]["role"] == "system":
411
+ # NOTE: we require a system message to exist for the manual steps texts
412
+ # Append the manual step instructions
413
+ content_to_append = (
414
+ self._manual_tool_call_step1 if not have_tool_results else self._manual_tool_call_step2
415
+ )
416
+
417
+ if content_to_append != "":
418
+ # Append the relevant tool call instruction to the latest user message
419
+ if ollama_messages[-1]["role"] == "user":
420
+ ollama_messages[-1]["content"] = ollama_messages[-1]["content"] + content_to_append
421
+ else:
422
+ ollama_messages.append({"role": "user", "content": content_to_append})
423
+
424
+ # Convert tool call and tool result messages to normal text messages for Ollama
425
+ for i, message in enumerate(ollama_messages):
426
+ if "tool_calls" in message:
427
+ # Recommended tool calls
428
+ content = "Run the following function(s):"
429
+ for tool_call in message["tool_calls"]:
430
+ content = content + "\n" + str(tool_call)
431
+ ollama_messages[i] = {"role": "assistant", "content": content}
432
+ if "tool_call_id" in message:
433
+ # Executed tool results
434
+ message["result"] = message["content"]
435
+ del message["content"]
436
+ del message["role"]
437
+ content = "The following function was run: " + str(message)
438
+ ollama_messages[i] = {"role": "user", "content": content}
439
+
440
+ # As we are changing messages, let's merge if they have two user messages on the end and the last one is tool call step instructions
441
+ if (
442
+ len(ollama_messages) >= 2
443
+ and not self._native_tool_calls
444
+ and ollama_messages[-2]["role"] == "user"
445
+ and ollama_messages[-1]["role"] == "user"
446
+ and (
447
+ ollama_messages[-1]["content"] == self._manual_tool_call_step1
448
+ or ollama_messages[-1]["content"] == self._manual_tool_call_step2
449
+ )
450
+ ):
451
+ ollama_messages[-2]["content"] = ollama_messages[-2]["content"] + ollama_messages[-1]["content"]
452
+ del ollama_messages[-1]
453
+
454
+ # Ensure the last message is a user / system message, if not, add a user message
455
+ if ollama_messages[-1]["role"] != "user" and ollama_messages[-1]["role"] != "system":
456
+ ollama_messages.append({"role": "user", "content": "Please continue."})
457
+
458
+ return ollama_messages
459
+
460
+
461
+ def response_to_tool_call(response_string: str) -> Any:
462
+ """Attempts to convert the response to an object, aimed to align with function format [{},{}]"""
463
+
464
+ # We try and detect the list[dict] format:
465
+ # Pattern 1 is [{},{}]
466
+ # Pattern 2 is {} (without the [], so could be a single function call)
467
+ patterns = [r"\[[\s\S]*?\]", r"\{[\s\S]*\}"]
468
+
469
+ for i, pattern in enumerate(patterns):
470
+ # Search for the pattern in the input string
471
+ matches = re.findall(pattern, response_string.strip())
472
+
473
+ for match in matches:
474
+
475
+ # It has matched, extract it and load it
476
+ json_str = match.strip()
477
+ data_object = None
478
+
479
+ try:
480
+ # Attempt to convert it as is
481
+ data_object = json.loads(json_str)
482
+ except Exception:
483
+ try:
484
+ # If that fails, attempt to repair it
485
+
486
+ if i == 0:
487
+ # Enclose to a JSON object for repairing, which is restored upon fix
488
+ fixed_json = repair_json("{'temp':" + json_str + "}")
489
+ data_object = json.loads(fixed_json)
490
+ data_object = data_object["temp"]
491
+ else:
492
+ fixed_json = repair_json(json_str)
493
+ data_object = json.loads(fixed_json)
494
+ except json.JSONDecodeError as e:
495
+ if e.msg == "Invalid \\escape":
496
+ # Handle Mistral/Mixtral trying to escape underlines with \\
497
+ try:
498
+ json_str = json_str.replace("\\_", "_")
499
+ if i == 0:
500
+ fixed_json = repair_json("{'temp':" + json_str + "}")
501
+ data_object = json.loads(fixed_json)
502
+ data_object = data_object["temp"]
503
+ else:
504
+ fixed_json = repair_json("{'temp':" + json_str + "}")
505
+ data_object = json.loads(fixed_json)
506
+ except Exception:
507
+ pass
508
+ except Exception:
509
+ pass
510
+
511
+ if data_object is not None:
512
+ data_object = _object_to_tool_call(data_object)
513
+
514
+ if data_object is not None:
515
+ return data_object
516
+
517
+ # There's no tool call in the response
518
+ return None
519
+
520
+
521
+ def _object_to_tool_call(data_object: Any) -> List[Dict]:
522
+ """Attempts to convert an object to a valid tool call object List[Dict] and returns it, if it can, otherwise None"""
523
+
524
+ # If it's a dictionary and not a list then wrap in a list
525
+ if isinstance(data_object, dict):
526
+ data_object = [data_object]
527
+
528
+ # Validate that the data is a list of dictionaries
529
+ if isinstance(data_object, list) and all(isinstance(item, dict) for item in data_object):
530
+ # Perfect format, a list of dictionaries
531
+
532
+ # Check that each dictionary has at least 'name', optionally 'arguments' and no other keys
533
+ is_invalid = False
534
+ for item in data_object:
535
+ if not is_valid_tool_call_item(item):
536
+ is_invalid = True
537
+ break
538
+
539
+ # All passed, name and (optionally) arguments exist for all entries.
540
+ if not is_invalid:
541
+ return data_object
542
+ elif isinstance(data_object, list):
543
+ # If it's a list but the items are not dictionaries, check if they are strings that can be converted to dictionaries
544
+ data_copy = data_object.copy()
545
+ is_invalid = False
546
+ for i, item in enumerate(data_copy):
547
+ try:
548
+ new_item = eval(item)
549
+ if isinstance(new_item, dict):
550
+ if is_valid_tool_call_item(new_item):
551
+ data_object[i] = new_item
552
+ else:
553
+ is_invalid = True
554
+ break
555
+ else:
556
+ is_invalid = True
557
+ break
558
+ except Exception:
559
+ is_invalid = True
560
+ break
561
+
562
+ if not is_invalid:
563
+ return data_object
564
+
565
+ return None
566
+
567
+
568
+ def is_valid_tool_call_item(call_item: dict) -> bool:
569
+ """Check that a dictionary item has at least 'name', optionally 'arguments' and no other keys to match a tool call JSON"""
570
+ if "name" not in call_item or not isinstance(call_item["name"], str):
571
+ return False
572
+
573
+ if set(call_item.keys()) - {"name", "arguments"}:
574
+ return False
575
+
576
+ return True