ag2 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (160) hide show
  1. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/METADATA +5 -146
  2. ag2-0.5.0.dist-info/RECORD +6 -0
  3. ag2-0.5.0.dist-info/top_level.txt +1 -0
  4. ag2-0.4.1.dist-info/RECORD +0 -158
  5. ag2-0.4.1.dist-info/top_level.txt +0 -1
  6. autogen/__init__.py +0 -17
  7. autogen/_pydantic.py +0 -116
  8. autogen/agentchat/__init__.py +0 -42
  9. autogen/agentchat/agent.py +0 -142
  10. autogen/agentchat/assistant_agent.py +0 -85
  11. autogen/agentchat/chat.py +0 -306
  12. autogen/agentchat/contrib/__init__.py +0 -0
  13. autogen/agentchat/contrib/agent_builder.py +0 -788
  14. autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
  15. autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
  16. autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
  17. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
  18. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
  19. autogen/agentchat/contrib/agent_eval/task.py +0 -43
  20. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  21. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  22. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  23. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  24. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  25. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  26. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  27. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  28. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  29. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  30. autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
  31. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
  32. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
  33. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
  34. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
  35. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
  36. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
  37. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
  38. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
  39. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
  40. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
  41. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
  42. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
  43. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
  44. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
  45. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
  49. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
  50. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
  51. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
  52. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
  53. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
  54. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
  55. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
  56. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
  57. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
  58. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
  59. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
  60. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
  61. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
  62. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
  63. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
  64. autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
  65. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
  66. autogen/agentchat/contrib/captainagent.py +0 -490
  67. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  68. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  69. autogen/agentchat/contrib/graph_rag/document.py +0 -30
  70. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
  71. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
  72. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
  73. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
  74. autogen/agentchat/contrib/img_utils.py +0 -390
  75. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  76. autogen/agentchat/contrib/llava_agent.py +0 -176
  77. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  78. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  79. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  80. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  81. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
  82. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  83. autogen/agentchat/contrib/swarm_agent.py +0 -463
  84. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  85. autogen/agentchat/contrib/tool_retriever.py +0 -120
  86. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  87. autogen/agentchat/contrib/vectordb/base.py +0 -243
  88. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  89. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  90. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  91. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  92. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  93. autogen/agentchat/contrib/web_surfer.py +0 -305
  94. autogen/agentchat/conversable_agent.py +0 -2908
  95. autogen/agentchat/groupchat.py +0 -1668
  96. autogen/agentchat/user_proxy_agent.py +0 -109
  97. autogen/agentchat/utils.py +0 -207
  98. autogen/browser_utils.py +0 -291
  99. autogen/cache/__init__.py +0 -10
  100. autogen/cache/abstract_cache_base.py +0 -78
  101. autogen/cache/cache.py +0 -182
  102. autogen/cache/cache_factory.py +0 -85
  103. autogen/cache/cosmos_db_cache.py +0 -150
  104. autogen/cache/disk_cache.py +0 -109
  105. autogen/cache/in_memory_cache.py +0 -61
  106. autogen/cache/redis_cache.py +0 -128
  107. autogen/code_utils.py +0 -745
  108. autogen/coding/__init__.py +0 -22
  109. autogen/coding/base.py +0 -113
  110. autogen/coding/docker_commandline_code_executor.py +0 -262
  111. autogen/coding/factory.py +0 -45
  112. autogen/coding/func_with_reqs.py +0 -203
  113. autogen/coding/jupyter/__init__.py +0 -22
  114. autogen/coding/jupyter/base.py +0 -32
  115. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  116. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  117. autogen/coding/jupyter/jupyter_client.py +0 -224
  118. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  119. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  120. autogen/coding/local_commandline_code_executor.py +0 -410
  121. autogen/coding/markdown_code_extractor.py +0 -44
  122. autogen/coding/utils.py +0 -57
  123. autogen/exception_utils.py +0 -46
  124. autogen/extensions/__init__.py +0 -0
  125. autogen/formatting_utils.py +0 -76
  126. autogen/function_utils.py +0 -362
  127. autogen/graph_utils.py +0 -148
  128. autogen/io/__init__.py +0 -15
  129. autogen/io/base.py +0 -105
  130. autogen/io/console.py +0 -43
  131. autogen/io/websockets.py +0 -213
  132. autogen/logger/__init__.py +0 -11
  133. autogen/logger/base_logger.py +0 -140
  134. autogen/logger/file_logger.py +0 -287
  135. autogen/logger/logger_factory.py +0 -29
  136. autogen/logger/logger_utils.py +0 -42
  137. autogen/logger/sqlite_logger.py +0 -459
  138. autogen/math_utils.py +0 -356
  139. autogen/oai/__init__.py +0 -33
  140. autogen/oai/anthropic.py +0 -428
  141. autogen/oai/bedrock.py +0 -606
  142. autogen/oai/cerebras.py +0 -270
  143. autogen/oai/client.py +0 -1148
  144. autogen/oai/client_utils.py +0 -167
  145. autogen/oai/cohere.py +0 -453
  146. autogen/oai/completion.py +0 -1216
  147. autogen/oai/gemini.py +0 -469
  148. autogen/oai/groq.py +0 -281
  149. autogen/oai/mistral.py +0 -279
  150. autogen/oai/ollama.py +0 -582
  151. autogen/oai/openai_utils.py +0 -811
  152. autogen/oai/together.py +0 -343
  153. autogen/retrieve_utils.py +0 -487
  154. autogen/runtime_logging.py +0 -163
  155. autogen/token_count_utils.py +0 -259
  156. autogen/types.py +0 -20
  157. autogen/version.py +0 -7
  158. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/LICENSE +0 -0
  159. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/NOTICE.md +0 -0
  160. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/WHEEL +0 -0
@@ -1,471 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import os
8
- import re
9
- from time import sleep
10
- from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
11
-
12
- from pydantic import BaseModel, Extra, root_validator
13
-
14
- from autogen._pydantic import PYDANTIC_V1
15
- from autogen.agentchat import Agent, UserProxyAgent
16
- from autogen.code_utils import UNKNOWN, execute_code, extract_code, infer_lang
17
- from autogen.math_utils import get_answer
18
-
19
- PROMPTS = {
20
- # default
21
- "default": """Let's use Python to solve a math problem.
22
-
23
- Query requirements:
24
- You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.
25
- You can use packages like sympy to help you.
26
- You must follow the formats below to write your code:
27
- ```python
28
- # your code
29
- ```
30
-
31
- First state the key idea to solve the problem. You may choose from three ways to solve the problem:
32
- Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.
33
- Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.
34
- Case 3: If the problem cannot be handled in the above two ways, please follow this process:
35
- 1. Solve the problem step by step (do not over-divide the steps).
36
- 2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).
37
- 3. Wait for me to give the results.
38
- 4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
39
-
40
- After all the queries are run and you get the answer, put the answer in \\boxed{}.
41
-
42
- Problem:
43
- """,
44
- # select python or wolfram
45
- "two_tools": """Let's use two tools (Python and Wolfram alpha) to solve a math problem.
46
-
47
- Query requirements:
48
- You must follow the formats below to write your query:
49
- For Wolfram Alpha:
50
- ```wolfram
51
- # one wolfram query
52
- ```
53
- For Python:
54
- ```python
55
- # your code
56
- ```
57
- When using Python, you should always use the 'print' function for the output and use fractions/radical forms instead of decimals. You can use packages like sympy to help you.
58
- When using wolfram, give one query in each code block.
59
-
60
- Please follow this process:
61
- 1. Solve the problem step by step (do not over-divide the steps).
62
- 2. Take out any queries that can be asked through Python or Wolfram Alpha, select the most suitable tool to be used (for example, any calculations or equations that can be calculated).
63
- 3. Wait for me to give the results.
64
- 4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
65
-
66
- After all the queries are run and you get the answer, put the final answer in \\boxed{}.
67
-
68
- Problem: """,
69
- # use python step by step
70
- "python": """Let's use Python to solve a math problem.
71
-
72
- Query requirements:
73
- You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.
74
- You can use packages like sympy to help you.
75
- You must follow the formats below to write your code:
76
- ```python
77
- # your code
78
- ```
79
-
80
- Please follow this process:
81
- 1. Solve the problem step by step (do not over-divide the steps).
82
- 2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).
83
- 3. Wait for me to give the results.
84
- 4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.
85
-
86
- After all the queries are run and you get the answer, put the answer in \\boxed{}.
87
-
88
- Problem: """,
89
- }
90
-
91
-
92
- def _is_termination_msg_mathchat(message):
93
- """Check if a message is a termination message."""
94
- if isinstance(message, dict):
95
- message = message.get("content")
96
- if message is None:
97
- return False
98
- cb = extract_code(message)
99
- contain_code = False
100
- for c in cb:
101
- if c[0] == "python" or c[0] == "wolfram":
102
- contain_code = True
103
- break
104
- return not contain_code and get_answer(message) is not None and get_answer(message) != ""
105
-
106
-
107
- def _add_print_to_last_line(code):
108
- """Add print() to the last line of a string."""
109
- # 1. check if there is already a print statement
110
- if "print(" in code:
111
- return code
112
- # 2. extract the last line, enclose it in print() and return the new string
113
- lines = code.splitlines()
114
- last_line = lines[-1]
115
- if "\t" in last_line or "=" in last_line:
116
- return code
117
- if "=" in last_line:
118
- last_line = "print(" + last_line.split(" = ")[0] + ")"
119
- lines.append(last_line)
120
- else:
121
- lines[-1] = "print(" + last_line + ")"
122
- # 3. join the lines back together
123
- return "\n".join(lines)
124
-
125
-
126
- def _remove_print(code):
127
- """remove all print statements from a string."""
128
- lines = code.splitlines()
129
- lines = [line for line in lines if not line.startswith("print(")]
130
- return "\n".join(lines)
131
-
132
-
133
- class MathUserProxyAgent(UserProxyAgent):
134
- """(Experimental) A MathChat agent that can handle math problems."""
135
-
136
- MAX_CONSECUTIVE_AUTO_REPLY = 15 # maximum number of consecutive auto replies (subject to future change)
137
- DEFAULT_REPLY = "Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\boxed{}.)"
138
-
139
- def __init__(
140
- self,
141
- name: Optional[str] = "MathChatAgent", # default set to MathChatAgent
142
- is_termination_msg: Optional[
143
- Callable[[Dict], bool]
144
- ] = _is_termination_msg_mathchat, # terminate if \boxed{} in message
145
- human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", # Fully automated
146
- default_auto_reply: Optional[Union[str, Dict, None]] = DEFAULT_REPLY,
147
- max_invalid_q_per_step=3, # a parameter needed in MathChat
148
- **kwargs,
149
- ):
150
- """
151
- Args:
152
- name (str): name of the agent
153
- is_termination_msg (function): a function that takes a message in the form of a dictionary and returns a boolean value indicating if this received message is a termination message.
154
- The dict can contain the following keys: "content", "role", "name", "function_call".
155
- human_input_mode (str): whether to ask for human inputs every time a message is received.
156
- Possible values are "ALWAYS", "TERMINATE", "NEVER".
157
- (1) When "ALWAYS", the agent prompts for human input every time a message is received.
158
- Under this mode, the conversation stops when the human input is "exit",
159
- or when is_termination_msg is True and there is no human input.
160
- (2) When "TERMINATE", the agent only prompts for human input only when a termination message is received or
161
- the number of auto reply reaches the max_consecutive_auto_reply.
162
- (3) (Default) When "NEVER", the agent will never prompt for human input. Under this mode, the conversation stops
163
- when the number of auto reply reaches the max_consecutive_auto_reply or when is_termination_msg is True.
164
- default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
165
- max_invalid_q_per_step (int): (ADDED) the maximum number of invalid queries per step.
166
- **kwargs (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
167
- """
168
- super().__init__(
169
- name=name,
170
- is_termination_msg=is_termination_msg,
171
- human_input_mode=human_input_mode,
172
- default_auto_reply=default_auto_reply,
173
- **kwargs,
174
- )
175
- self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, position=2)
176
- # fixed var
177
- self._max_invalid_q_per_step = max_invalid_q_per_step
178
-
179
- # mutable
180
- self._valid_q_count = 0
181
- self._total_q_count = 0
182
- self._accum_invalid_q_per_step = 0
183
- self._previous_code = ""
184
- self.last_reply = None
185
-
186
- @staticmethod
187
- def message_generator(sender, recipient, context):
188
- """Generate a prompt for the assistant agent with the given problem and prompt.
189
-
190
- Args:
191
- sender (Agent): the sender of the message.
192
- recipient (Agent): the recipient of the message.
193
- context (dict): a dictionary with the following fields:
194
- problem (str): the problem to be solved.
195
- prompt_type (str, Optional): the type of the prompt. Possible values are "default", "python", "wolfram".
196
- (1) "default": the prompt that allows the agent to choose between 3 ways to solve a problem:
197
- 1. write a python program to solve it directly.
198
- 2. solve it directly without python.
199
- 3. solve it step by step with python.
200
- (2) "python":
201
- a simplified prompt from the third way of the "default" prompt, that asks the assistant
202
- to solve the problem step by step with python.
203
- (3) "two_tools":
204
- a simplified prompt similar to the "python" prompt, but allows the model to choose between
205
- Python and Wolfram Alpha to solve the problem.
206
- customized_prompt (str, Optional): a customized prompt to be used. If it is not None, the prompt_type will be ignored.
207
-
208
- Returns:
209
- str: the generated prompt ready to be sent to the assistant agent.
210
- """
211
- sender._reset()
212
- problem = context.get("problem")
213
- prompt_type = context.get("prompt_type", "default")
214
- customized_prompt = context.get("customized_prompt", None)
215
- if customized_prompt is not None:
216
- return customized_prompt + problem
217
- return PROMPTS[prompt_type] + problem
218
-
219
- def _reset(self):
220
- # super().reset()
221
- self._valid_q_count = 0
222
- self._total_q_count = 0
223
- self._accum_invalid_q_per_step = 0
224
- self._previous_code = ""
225
- self.last_reply = None
226
-
227
- def execute_one_python_code(self, pycode):
228
- """Execute python code blocks.
229
-
230
- Previous python code will be saved and executed together with the new code.
231
- the "print" function will also be added to the last line of the code if needed
232
- """
233
- # Need to replace all "; " with "\n" to avoid syntax error when adding `print` to the last line
234
- pycode = pycode.replace("; ", "\n").replace(";", "\n")
235
- pycode = self._previous_code + _add_print_to_last_line(pycode)
236
-
237
- return_code, output, _ = execute_code(pycode, **self._code_execution_config, timeout=5)
238
- is_success = return_code == 0
239
-
240
- if not is_success:
241
- # Remove the file information from the error string
242
- pattern = r'File "/[^"]+\.py", line \d+, in .+\n'
243
- if isinstance(output, str):
244
- output = re.sub(pattern, "", output)
245
- output = "Error: " + output
246
- elif output == "":
247
- # Check if there is any print statement
248
- if "print" not in pycode:
249
- output = "No output found. Make sure you print the results."
250
- is_success = False
251
- else:
252
- output = "No output found."
253
- is_success = True
254
-
255
- if len(output) > 2000:
256
- output = "Your requested query response is too long. You might have made a mistake. Please revise your reasoning and query."
257
- is_success = False
258
-
259
- if is_success:
260
- # remove print and check if it still works
261
- tmp = self._previous_code + "\n" + _remove_print(pycode) + "\n"
262
- rcode, _, _ = execute_code(tmp, **self._code_execution_config)
263
- else:
264
- # only add imports and check if it works
265
- tmp = self._previous_code + "\n"
266
- for line in pycode.split("\n"):
267
- if "import" in line:
268
- tmp += line + "\n"
269
- rcode, _, _ = execute_code(tmp, **self._code_execution_config)
270
-
271
- if rcode == 0:
272
- self._previous_code = tmp
273
- return output, is_success
274
-
275
- def execute_one_wolfram_query(self, query: str):
276
- """Run one wolfram query and return the output.
277
-
278
- Args:
279
- query: string of the query.
280
-
281
- Returns:
282
- output: string with the output of the query.
283
- is_success: boolean indicating whether the query was successful.
284
- """
285
- # wolfram query handler
286
- wolfram = WolframAlphaAPIWrapper()
287
- output, is_success = wolfram.run(query)
288
- if output == "":
289
- output = "Error: The wolfram query is invalid."
290
- is_success = False
291
- return output, is_success
292
-
293
- def _generate_math_reply(
294
- self,
295
- messages: Optional[List[Dict]] = None,
296
- sender: Optional[Agent] = None,
297
- config: Optional[Any] = None,
298
- ):
299
- """Generate an auto reply."""
300
- if messages is None:
301
- messages = self._oai_messages[sender]
302
- message = messages[-1]
303
- message = message.get("content", "")
304
- code_blocks = extract_code(message)
305
-
306
- if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
307
- # no code block is found, lang should be `UNKNOWN``
308
- return True, self._default_auto_reply
309
- is_success, all_success = True, True
310
- reply = ""
311
- for code_block in code_blocks:
312
- lang, code = code_block
313
- if not lang:
314
- lang = infer_lang(code)
315
- if lang == "python":
316
- output, is_success = self.execute_one_python_code(code)
317
- elif lang == "wolfram":
318
- output, is_success = self.execute_one_wolfram_query(code)
319
- else:
320
- output = "Error: Unknown language."
321
- is_success = False
322
-
323
- reply += output + "\n"
324
- if not is_success:
325
- all_success = False
326
- self._valid_q_count -= 1 # count invalid queries
327
-
328
- reply = reply.strip()
329
-
330
- if self.last_reply == reply:
331
- return True, reply + "\nYour query or result is same from the last, please try a new approach."
332
- self.last_reply = reply
333
-
334
- if not all_success:
335
- self._accum_invalid_q_per_step += 1
336
- if self._accum_invalid_q_per_step > self._max_invalid_q_per_step:
337
- self._accum_invalid_q_per_step = 0
338
- reply = "Please revisit the problem statement and your reasoning. If you think this step is correct, solve it yourself and continue the next step. Otherwise, correct this step."
339
-
340
- return True, reply
341
-
342
-
343
- # Modified based on langchain. Langchain is licensed under MIT License:
344
- # The MIT License
345
-
346
- # Copyright (c) Harrison Chase
347
-
348
- # Permission is hereby granted, free of charge, to any person obtaining a copy
349
- # of this software and associated documentation files (the "Software"), to deal
350
- # in the Software without restriction, including without limitation the rights
351
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
352
- # copies of the Software, and to permit persons to whom the Software is
353
- # furnished to do so, subject to the following conditions:
354
-
355
- # The above copyright notice and this permission notice shall be included in
356
- # all copies or substantial portions of the Software.
357
-
358
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
359
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
360
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
361
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
362
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
363
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
364
- # THE SOFTWARE.
365
-
366
-
367
- def get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None) -> str:
368
- """Get a value from a dictionary or an environment variable."""
369
- if key in data and data[key]:
370
- return data[key]
371
- elif env_key in os.environ and os.environ[env_key]:
372
- return os.environ[env_key]
373
- elif default is not None:
374
- return default
375
- else:
376
- raise ValueError(
377
- f"Did not find {key}, please add an environment variable"
378
- f" `{env_key}` which contains it, or pass"
379
- f" `{key}` as a named parameter."
380
- )
381
-
382
-
383
- class WolframAlphaAPIWrapper(BaseModel):
384
- """Wrapper for Wolfram Alpha.
385
-
386
- Docs for using:
387
-
388
- 1. Go to wolfram alpha and sign up for a developer account
389
- 2. Create an app and get your APP ID
390
- 3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
391
- 4. pip install wolframalpha
392
-
393
- """
394
-
395
- wolfram_client: Any #: :meta private:
396
- wolfram_alpha_appid: Optional[str] = None
397
-
398
- class Config:
399
- """Configuration for this pydantic object."""
400
-
401
- if PYDANTIC_V1:
402
- extra = Extra.forbid
403
-
404
- @root_validator(skip_on_failure=True)
405
- def validate_environment(cls, values: Dict) -> Dict:
406
- """Validate that api key and python package exists in environment."""
407
- wolfram_alpha_appid = get_from_dict_or_env(values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID")
408
- values["wolfram_alpha_appid"] = wolfram_alpha_appid
409
-
410
- try:
411
- import wolframalpha
412
-
413
- except ImportError as e:
414
- raise ImportError("wolframalpha is not installed. Please install it with `pip install wolframalpha`") from e
415
- client = wolframalpha.Client(wolfram_alpha_appid)
416
- values["wolfram_client"] = client
417
-
418
- return values
419
-
420
- def run(self, query: str) -> Tuple[str, bool]:
421
- """Run query through WolframAlpha and parse result."""
422
- from urllib.error import HTTPError
423
-
424
- is_success = False # added
425
- res = None
426
- for _ in range(20):
427
- try:
428
- res = self.wolfram_client.query(query)
429
- break
430
- except HTTPError:
431
- sleep(1)
432
- except Exception:
433
- return (
434
- "Wolfram Alpha wasn't able to answer it. Please try a new query for wolfram or use python.",
435
- is_success,
436
- )
437
- if res is None:
438
- return (
439
- "Wolfram Alpha wasn't able to answer it (may due to web error), you can try again or use python.",
440
- is_success,
441
- )
442
-
443
- try:
444
- if not res["@success"]:
445
- return (
446
- "Your Wolfram query is invalid. Please try a new query for wolfram or use python.",
447
- is_success,
448
- )
449
- assumption = next(res.pods).text
450
- answer = ""
451
- for result in res["pod"]:
452
- if result["@title"] == "Solution":
453
- answer = result["subpod"]["plaintext"]
454
- if result["@title"] == "Results" or result["@title"] == "Solutions":
455
- for i, sub in enumerate(result["subpod"]):
456
- answer += f"ans {i}: " + sub["plaintext"] + "\n"
457
- break
458
- if answer == "":
459
- answer = next(res.results).text
460
-
461
- except Exception:
462
- return (
463
- "Wolfram Alpha wasn't able to answer it. Please try a new query for wolfram or use python.",
464
- is_success,
465
- )
466
-
467
- if answer is None or answer == "":
468
- # We don't want to return the assumption alone if answer is empty
469
- return "No good Wolfram Alpha Result was found", is_success
470
- is_success = True
471
- return f"Assumption: {assumption} \nAnswer: {answer}", is_success
@@ -1,128 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import copy
8
- from typing import Dict, List, Optional, Tuple, Union
9
-
10
- from autogen import OpenAIWrapper
11
- from autogen.agentchat import Agent, ConversableAgent
12
- from autogen.agentchat.contrib.img_utils import (
13
- gpt4v_formatter,
14
- message_formatter_pil_to_b64,
15
- )
16
- from autogen.code_utils import content_str
17
-
18
- from ..._pydantic import model_dump
19
-
20
- DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant."""
21
- DEFAULT_MODEL = "gpt-4-vision-preview"
22
-
23
-
24
- class MultimodalConversableAgent(ConversableAgent):
25
- DEFAULT_CONFIG = {
26
- "model": DEFAULT_MODEL,
27
- }
28
-
29
- def __init__(
30
- self,
31
- name: str,
32
- system_message: Optional[Union[str, List]] = DEFAULT_LMM_SYS_MSG,
33
- is_termination_msg: str = None,
34
- *args,
35
- **kwargs,
36
- ):
37
- """
38
- Args:
39
- name (str): agent name.
40
- system_message (str): system message for the OpenAIWrapper inference.
41
- Please override this attribute if you want to reprogram the agent.
42
- **kwargs (dict): Please refer to other kwargs in
43
- [ConversableAgent](../conversable_agent#__init__).
44
- """
45
- super().__init__(
46
- name,
47
- system_message,
48
- is_termination_msg=is_termination_msg,
49
- *args,
50
- **kwargs,
51
- )
52
- # call the setter to handle special format.
53
- self.update_system_message(system_message)
54
- self._is_termination_msg = (
55
- is_termination_msg
56
- if is_termination_msg is not None
57
- else (lambda x: content_str(x.get("content")) == "TERMINATE")
58
- )
59
-
60
- # Override the `generate_oai_reply`
61
- self.replace_reply_func(ConversableAgent.generate_oai_reply, MultimodalConversableAgent.generate_oai_reply)
62
- self.replace_reply_func(
63
- ConversableAgent.a_generate_oai_reply,
64
- MultimodalConversableAgent.a_generate_oai_reply,
65
- )
66
-
67
- def update_system_message(self, system_message: Union[Dict, List, str]):
68
- """Update the system message.
69
-
70
- Args:
71
- system_message (str): system message for the OpenAIWrapper inference.
72
- """
73
- self._oai_system_message[0]["content"] = self._message_to_dict(system_message)["content"]
74
- self._oai_system_message[0]["role"] = "system"
75
-
76
- @staticmethod
77
- def _message_to_dict(message: Union[Dict, List, str]) -> Dict:
78
- """Convert a message to a dictionary. This implementation
79
- handles the GPT-4V formatting for easier prompts.
80
-
81
- The message can be a string, a dictionary, or a list of dictionaries:
82
- - If it's a string, it will be cast into a list and placed in the 'content' field.
83
- - If it's a list, it will be directly placed in the 'content' field.
84
- - If it's a dictionary, it is already in message dict format. The 'content' field of this dictionary
85
- will be processed using the gpt4v_formatter.
86
- """
87
- if isinstance(message, str):
88
- return {"content": gpt4v_formatter(message, img_format="pil")}
89
- if isinstance(message, list):
90
- return {"content": message}
91
- if isinstance(message, dict):
92
- assert "content" in message, "The message dict must have a `content` field"
93
- if isinstance(message["content"], str):
94
- message = copy.deepcopy(message)
95
- message["content"] = gpt4v_formatter(message["content"], img_format="pil")
96
- try:
97
- content_str(message["content"])
98
- except (TypeError, ValueError) as e:
99
- print("The `content` field should be compatible with the content_str function!")
100
- raise e
101
- return message
102
- raise ValueError(f"Unsupported message type: {type(message)}")
103
-
104
- def generate_oai_reply(
105
- self,
106
- messages: Optional[List[Dict]] = None,
107
- sender: Optional[Agent] = None,
108
- config: Optional[OpenAIWrapper] = None,
109
- ) -> Tuple[bool, Union[str, Dict, None]]:
110
- """Generate a reply using autogen.oai."""
111
- client = self.client if config is None else config
112
- if client is None:
113
- return False, None
114
- if messages is None:
115
- messages = self._oai_messages[sender]
116
-
117
- messages_with_b64_img = message_formatter_pil_to_b64(self._oai_system_message + messages)
118
-
119
- # TODO: #1143 handle token limit exceeded error
120
- response = client.create(
121
- context=messages[-1].pop("context", None), messages=messages_with_b64_img, agent=self.name
122
- )
123
-
124
- # TODO: line 301, line 271 is converting messages to dict. Can be removed after ChatCompletionMessage_to_dict is merged.
125
- extracted_response = client.extract_text_or_completion_object(response)[0]
126
- if not isinstance(extracted_response, str):
127
- extracted_response = model_dump(extracted_response)
128
- return True, extracted_response