ag2 0.3.2b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (112) hide show
  1. ag2-0.3.2b2.dist-info/LICENSE +201 -0
  2. ag2-0.3.2b2.dist-info/METADATA +490 -0
  3. ag2-0.3.2b2.dist-info/NOTICE.md +19 -0
  4. ag2-0.3.2b2.dist-info/RECORD +112 -0
  5. ag2-0.3.2b2.dist-info/WHEEL +5 -0
  6. ag2-0.3.2b2.dist-info/top_level.txt +1 -0
  7. autogen/__init__.py +17 -0
  8. autogen/_pydantic.py +116 -0
  9. autogen/agentchat/__init__.py +26 -0
  10. autogen/agentchat/agent.py +142 -0
  11. autogen/agentchat/assistant_agent.py +85 -0
  12. autogen/agentchat/chat.py +306 -0
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +785 -0
  15. autogen/agentchat/contrib/agent_optimizer.py +450 -0
  16. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  17. autogen/agentchat/contrib/capabilities/agent_capability.py +21 -0
  18. autogen/agentchat/contrib/capabilities/generate_images.py +297 -0
  19. autogen/agentchat/contrib/capabilities/teachability.py +406 -0
  20. autogen/agentchat/contrib/capabilities/text_compressors.py +72 -0
  21. autogen/agentchat/contrib/capabilities/transform_messages.py +92 -0
  22. autogen/agentchat/contrib/capabilities/transforms.py +565 -0
  23. autogen/agentchat/contrib/capabilities/transforms_util.py +120 -0
  24. autogen/agentchat/contrib/capabilities/vision_capability.py +217 -0
  25. autogen/agentchat/contrib/gpt_assistant_agent.py +545 -0
  26. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  27. autogen/agentchat/contrib/graph_rag/document.py +24 -0
  28. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +76 -0
  29. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +50 -0
  30. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +56 -0
  31. autogen/agentchat/contrib/img_utils.py +390 -0
  32. autogen/agentchat/contrib/llamaindex_conversable_agent.py +114 -0
  33. autogen/agentchat/contrib/llava_agent.py +176 -0
  34. autogen/agentchat/contrib/math_user_proxy_agent.py +471 -0
  35. autogen/agentchat/contrib/multimodal_conversable_agent.py +128 -0
  36. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
  37. autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
  38. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +701 -0
  39. autogen/agentchat/contrib/society_of_mind_agent.py +203 -0
  40. autogen/agentchat/contrib/text_analyzer_agent.py +76 -0
  41. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  42. autogen/agentchat/contrib/vectordb/base.py +243 -0
  43. autogen/agentchat/contrib/vectordb/chromadb.py +326 -0
  44. autogen/agentchat/contrib/vectordb/mongodb.py +559 -0
  45. autogen/agentchat/contrib/vectordb/pgvectordb.py +958 -0
  46. autogen/agentchat/contrib/vectordb/qdrant.py +334 -0
  47. autogen/agentchat/contrib/vectordb/utils.py +126 -0
  48. autogen/agentchat/contrib/web_surfer.py +305 -0
  49. autogen/agentchat/conversable_agent.py +2904 -0
  50. autogen/agentchat/groupchat.py +1666 -0
  51. autogen/agentchat/user_proxy_agent.py +109 -0
  52. autogen/agentchat/utils.py +207 -0
  53. autogen/browser_utils.py +291 -0
  54. autogen/cache/__init__.py +10 -0
  55. autogen/cache/abstract_cache_base.py +78 -0
  56. autogen/cache/cache.py +182 -0
  57. autogen/cache/cache_factory.py +85 -0
  58. autogen/cache/cosmos_db_cache.py +150 -0
  59. autogen/cache/disk_cache.py +109 -0
  60. autogen/cache/in_memory_cache.py +61 -0
  61. autogen/cache/redis_cache.py +128 -0
  62. autogen/code_utils.py +745 -0
  63. autogen/coding/__init__.py +22 -0
  64. autogen/coding/base.py +113 -0
  65. autogen/coding/docker_commandline_code_executor.py +262 -0
  66. autogen/coding/factory.py +45 -0
  67. autogen/coding/func_with_reqs.py +203 -0
  68. autogen/coding/jupyter/__init__.py +22 -0
  69. autogen/coding/jupyter/base.py +32 -0
  70. autogen/coding/jupyter/docker_jupyter_server.py +164 -0
  71. autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
  72. autogen/coding/jupyter/jupyter_client.py +224 -0
  73. autogen/coding/jupyter/jupyter_code_executor.py +161 -0
  74. autogen/coding/jupyter/local_jupyter_server.py +168 -0
  75. autogen/coding/local_commandline_code_executor.py +410 -0
  76. autogen/coding/markdown_code_extractor.py +44 -0
  77. autogen/coding/utils.py +57 -0
  78. autogen/exception_utils.py +46 -0
  79. autogen/extensions/__init__.py +0 -0
  80. autogen/formatting_utils.py +76 -0
  81. autogen/function_utils.py +362 -0
  82. autogen/graph_utils.py +148 -0
  83. autogen/io/__init__.py +15 -0
  84. autogen/io/base.py +105 -0
  85. autogen/io/console.py +43 -0
  86. autogen/io/websockets.py +213 -0
  87. autogen/logger/__init__.py +11 -0
  88. autogen/logger/base_logger.py +140 -0
  89. autogen/logger/file_logger.py +287 -0
  90. autogen/logger/logger_factory.py +29 -0
  91. autogen/logger/logger_utils.py +42 -0
  92. autogen/logger/sqlite_logger.py +459 -0
  93. autogen/math_utils.py +356 -0
  94. autogen/oai/__init__.py +33 -0
  95. autogen/oai/anthropic.py +428 -0
  96. autogen/oai/bedrock.py +600 -0
  97. autogen/oai/cerebras.py +264 -0
  98. autogen/oai/client.py +1148 -0
  99. autogen/oai/client_utils.py +167 -0
  100. autogen/oai/cohere.py +453 -0
  101. autogen/oai/completion.py +1216 -0
  102. autogen/oai/gemini.py +469 -0
  103. autogen/oai/groq.py +281 -0
  104. autogen/oai/mistral.py +279 -0
  105. autogen/oai/ollama.py +576 -0
  106. autogen/oai/openai_utils.py +810 -0
  107. autogen/oai/together.py +343 -0
  108. autogen/retrieve_utils.py +487 -0
  109. autogen/runtime_logging.py +163 -0
  110. autogen/token_count_utils.py +257 -0
  111. autogen/types.py +20 -0
  112. autogen/version.py +7 -0
autogen/code_utils.py ADDED
@@ -0,0 +1,745 @@
1
+ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ import logging
8
+ import os
9
+ import pathlib
10
+ import re
11
+ import string
12
+ import subprocess
13
+ import sys
14
+ import time
15
+ import venv
16
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError
17
+ from hashlib import md5
18
+ from types import SimpleNamespace
19
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
20
+
21
+ import docker
22
+
23
+ from autogen import oai
24
+
25
+ from .types import UserMessageImageContentPart, UserMessageTextContentPart
26
+
27
+ SENTINEL = object()
28
+ DEFAULT_MODEL = "gpt-4"
29
+ FAST_MODEL = "gpt-3.5-turbo"
30
+ # Regular expression for finding a code block
31
+ # ```[ \t]*(\w+)?[ \t]*\r?\n(.*?)[ \t]*\r?\n``` Matches multi-line code blocks.
32
+ # The [ \t]* matches the potential spaces before language name.
33
+ # The (\w+)? matches the language, where the ? indicates it is optional.
34
+ # The [ \t]* matches the potential spaces (not newlines) after language name.
35
+ # The \r?\n makes sure there is a linebreak after ```.
36
+ # The (.*?) matches the code itself (non-greedy).
37
+ # The \r?\n makes sure there is a linebreak before ```.
38
+ # The [ \t]* matches the potential spaces before closing ``` (the spec allows indentation).
39
+ CODE_BLOCK_PATTERN = r"```[ \t]*(\w+)?[ \t]*\r?\n(.*?)\r?\n[ \t]*```"
40
+ WORKING_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extensions")
41
+ UNKNOWN = "unknown"
42
+ TIMEOUT_MSG = "Timeout"
43
+ DEFAULT_TIMEOUT = 600
44
+ WIN32 = sys.platform == "win32"
45
+ PATH_SEPARATOR = WIN32 and "\\" or "/"
46
+ PYTHON_VARIANTS = ["python", "Python", "py"]
47
+
48
+ logger = logging.getLogger(__name__)
49
+
50
+
51
+ def content_str(content: Union[str, List[Union[UserMessageTextContentPart, UserMessageImageContentPart]], None]) -> str:
52
+ """Converts the `content` field of an OpenAI message into a string format.
53
+
54
+ This function processes content that may be a string, a list of mixed text and image URLs, or None,
55
+ and converts it into a string. Text is directly appended to the result string, while image URLs are
56
+ represented by a placeholder image token. If the content is None, an empty string is returned.
57
+
58
+ Args:
59
+ - content (Union[str, List, None]): The content to be processed. Can be a string, a list of dictionaries
60
+ representing text and image URLs, or None.
61
+
62
+ Returns:
63
+ str: A string representation of the input content. Image URLs are replaced with an image token.
64
+
65
+ Note:
66
+ - The function expects each dictionary in the list to have a "type" key that is either "text" or "image_url".
67
+ For "text" type, the "text" key's value is appended to the result. For "image_url", an image token is appended.
68
+ - This function is useful for handling content that may include both text and image references, especially
69
+ in contexts where images need to be represented as placeholders.
70
+ """
71
+ if content is None:
72
+ return ""
73
+ if isinstance(content, str):
74
+ return content
75
+ if not isinstance(content, list):
76
+ raise TypeError(f"content must be None, str, or list, but got {type(content)}")
77
+
78
+ rst = ""
79
+ for item in content:
80
+ if not isinstance(item, dict):
81
+ raise TypeError("Wrong content format: every element should be dict if the content is a list.")
82
+ assert "type" in item, "Wrong content format. Missing 'type' key in content's dict."
83
+ if item["type"] == "text":
84
+ rst += item["text"]
85
+ elif item["type"] == "image_url":
86
+ rst += "<image>"
87
+ else:
88
+ raise ValueError(f"Wrong content format: unknown type {item['type']} within the content")
89
+ return rst
90
+
91
+
92
+ def infer_lang(code: str) -> str:
93
+ """infer the language for the code.
94
+ TODO: make it robust.
95
+ """
96
+ if code.startswith("python ") or code.startswith("pip") or code.startswith("python3 "):
97
+ return "sh"
98
+
99
+ # check if code is a valid python code
100
+ try:
101
+ compile(code, "test", "exec")
102
+ return "python"
103
+ except SyntaxError:
104
+ # not a valid python code
105
+ return UNKNOWN
106
+
107
+
108
+ # TODO: In the future move, to better support https://spec.commonmark.org/0.30/#fenced-code-blocks
109
+ # perhaps by using a full Markdown parser.
110
+ def extract_code(
111
+ text: Union[str, List], pattern: str = CODE_BLOCK_PATTERN, detect_single_line_code: bool = False
112
+ ) -> List[Tuple[str, str]]:
113
+ """Extract code from a text.
114
+
115
+ Args:
116
+ text (str or List): The content to extract code from. The content can be
117
+ a string or a list, as returned by standard GPT or multimodal GPT.
118
+ pattern (str, optional): The regular expression pattern for finding the
119
+ code block. Defaults to CODE_BLOCK_PATTERN.
120
+ detect_single_line_code (bool, optional): Enable the new feature for
121
+ extracting single line code. Defaults to False.
122
+
123
+ Returns:
124
+ list: A list of tuples, each containing the language and the code.
125
+ If there is no code block in the input text, the language would be "unknown".
126
+ If there is code block but the language is not specified, the language would be "".
127
+ """
128
+ text = content_str(text)
129
+ if not detect_single_line_code:
130
+ match = re.findall(pattern, text, flags=re.DOTALL)
131
+ return match if match else [(UNKNOWN, text)]
132
+
133
+ # Extract both multi-line and single-line code block, separated by the | operator
134
+ # `([^`]+)`: Matches inline code.
135
+ code_pattern = re.compile(CODE_BLOCK_PATTERN + r"|`([^`]+)`")
136
+ code_blocks = code_pattern.findall(text)
137
+
138
+ # Extract the individual code blocks and languages from the matched groups
139
+ extracted = []
140
+ for lang, group1, group2 in code_blocks:
141
+ if group1:
142
+ extracted.append((lang.strip(), group1.strip()))
143
+ elif group2:
144
+ extracted.append(("", group2.strip()))
145
+
146
+ return extracted
147
+
148
+
149
+ def generate_code(pattern: str = CODE_BLOCK_PATTERN, **config) -> Tuple[str, float]:
150
+ """(openai<1) Generate code.
151
+
152
+ Args:
153
+ pattern (Optional, str): The regular expression pattern for finding the code block.
154
+ The default pattern is for finding a code block in a markdown file.
155
+ config (Optional, dict): The configuration for the API call.
156
+
157
+ Returns:
158
+ str: The generated code.
159
+ float: The cost of the generation.
160
+ """
161
+ response = oai.Completion.create(**config)
162
+ return extract_code(oai.Completion.extract_text(response)[0], pattern), response["cost"]
163
+
164
+
165
+ _IMPROVE_FUNCTION_CONFIG = {
166
+ "prompt": """Improve the function '{func_name}' to achieve the objective '{objective}'.
167
+ The current implementation of the function is as follows:
168
+ {file_string}""",
169
+ "model": DEFAULT_MODEL,
170
+ "request_timeout": 600,
171
+ }
172
+
173
+
174
+ def improve_function(file_name, func_name, objective, **config):
175
+ """(openai<1) Improve the function to achieve the objective."""
176
+ params = {**_IMPROVE_FUNCTION_CONFIG, **config}
177
+ # read the entire file into a str
178
+ with open(file_name, "r") as f:
179
+ file_string = f.read()
180
+ response = oai.Completion.create(
181
+ {"func_name": func_name, "objective": objective, "file_string": file_string}, **params
182
+ )
183
+ return oai.Completion.extract_text(response)[0], response["cost"]
184
+
185
+
186
+ _IMPROVE_CODE_CONFIG = {
187
+ "prompt": """Analyze the code in the following files and return a list of suggestions for improvement{followup}, to achieve the objective of '{objective}'.
188
+ {code}
189
+ """,
190
+ "model": DEFAULT_MODEL,
191
+ "request_timeout": 900,
192
+ }
193
+
194
+
195
+ def improve_code(files, objective, suggest_only=True, **config):
196
+ """(openai<1) Improve the code to achieve a given objective.
197
+
198
+ Args:
199
+ files (list): A list of file names containing the source code.
200
+ objective (str): The objective to achieve.
201
+ suggest_only (bool): Whether to return only the suggestions or the improved code.
202
+ config (Optional, dict): The configuration for the API call.
203
+
204
+ Returns:
205
+ str: The improved code if suggest_only=False; a list of suggestions if suggest_only=True (default).
206
+ float: The cost of the generation.
207
+ """
208
+ code = ""
209
+ for file_name in files:
210
+ # read the entire file into a string
211
+ with open(file_name, "r") as f:
212
+ file_string = f.read()
213
+ code += f"""{file_name}:
214
+ {file_string}
215
+
216
+ """
217
+ params = {**_IMPROVE_CODE_CONFIG, **config}
218
+ followup = "" if suggest_only else " followed by the improved code"
219
+ response = oai.Completion.create({"objective": objective, "code": code, "followup": followup}, **params)
220
+ return oai.Completion.extract_text(response)[0], response["cost"]
221
+
222
+
223
+ def timeout_handler(signum, frame):
224
+ raise TimeoutError("Timed out!")
225
+
226
+
227
+ def get_powershell_command():
228
+ try:
229
+ result = subprocess.run(["powershell", "$PSVersionTable.PSVersion.Major"], capture_output=True, text=True)
230
+ if result.returncode == 0:
231
+ return "powershell"
232
+ except (FileNotFoundError, NotADirectoryError):
233
+ # This means that 'powershell' command is not found so now we try looking for 'pwsh'
234
+ try:
235
+ result = subprocess.run(
236
+ ["pwsh", "-Command", "$PSVersionTable.PSVersion.Major"], capture_output=True, text=True
237
+ )
238
+ if result.returncode == 0:
239
+ return "pwsh"
240
+ except FileExistsError as e:
241
+ raise FileNotFoundError(
242
+ "Neither powershell.exe nor pwsh.exe is present in the system. "
243
+ "Please install PowerShell and try again. "
244
+ ) from e
245
+ except NotADirectoryError as e:
246
+ raise NotADirectoryError(
247
+ "PowerShell is either not installed or its path is not given "
248
+ "properly in the environment variable PATH. Please check the "
249
+ "path and try again. "
250
+ ) from e
251
+ except PermissionError as e:
252
+ raise PermissionError("No permission to run powershell.") from e
253
+
254
+
255
+ def _cmd(lang: str) -> str:
256
+ if lang in PYTHON_VARIANTS:
257
+ return "python"
258
+ if lang.startswith("python") or lang in ["bash", "sh"]:
259
+ return lang
260
+ if lang in ["shell"]:
261
+ return "sh"
262
+ if lang == "javascript":
263
+ return "node"
264
+ if lang in ["ps1", "pwsh", "powershell"]:
265
+ powershell_command = get_powershell_command()
266
+ return powershell_command
267
+
268
+ raise NotImplementedError(f"{lang} not recognized in code execution")
269
+
270
+
271
+ def is_docker_running() -> bool:
272
+ """Check if docker is running.
273
+
274
+ Returns:
275
+ bool: True if docker is running; False otherwise.
276
+ """
277
+ try:
278
+ client = docker.from_env()
279
+ client.ping()
280
+ return True
281
+ except docker.errors.DockerException:
282
+ return False
283
+
284
+
285
+ def in_docker_container() -> bool:
286
+ """Check if the code is running in a docker container.
287
+
288
+ Returns:
289
+ bool: True if the code is running in a docker container; False otherwise.
290
+ """
291
+ return os.path.exists("/.dockerenv")
292
+
293
+
294
+ def decide_use_docker(use_docker: Optional[bool]) -> Optional[bool]:
295
+ if use_docker is None:
296
+ env_var_use_docker = os.environ.get("AUTOGEN_USE_DOCKER", "True")
297
+
298
+ truthy_values = {"1", "true", "yes", "t"}
299
+ falsy_values = {"0", "false", "no", "f"}
300
+
301
+ # Convert the value to lowercase for case-insensitive comparison
302
+ env_var_use_docker_lower = env_var_use_docker.lower()
303
+
304
+ # Determine the boolean value based on the environment variable
305
+ if env_var_use_docker_lower in truthy_values:
306
+ use_docker = True
307
+ elif env_var_use_docker_lower in falsy_values:
308
+ use_docker = False
309
+ elif env_var_use_docker_lower == "none": # Special case for 'None' as a string
310
+ use_docker = None
311
+ else:
312
+ # Raise an error for any unrecognized value
313
+ raise ValueError(
314
+ f'Invalid value for AUTOGEN_USE_DOCKER: {env_var_use_docker}. Please set AUTOGEN_USE_DOCKER to "1/True/yes", "0/False/no", or "None".'
315
+ )
316
+ return use_docker
317
+
318
+
319
+ def check_can_use_docker_or_throw(use_docker) -> None:
320
+ if use_docker is not None:
321
+ inside_docker = in_docker_container()
322
+ docker_installed_and_running = is_docker_running()
323
+ if use_docker and not inside_docker and not docker_installed_and_running:
324
+ raise RuntimeError(
325
+ "Code execution is set to be run in docker (default behaviour) but docker is not running.\n"
326
+ "The options available are:\n"
327
+ "- Make sure docker is running (advised approach for code execution)\n"
328
+ '- Set "use_docker": False in code_execution_config\n'
329
+ '- Set AUTOGEN_USE_DOCKER to "0/False/no" in your environment variables'
330
+ )
331
+
332
+
333
+ def _sanitize_filename_for_docker_tag(filename: str) -> str:
334
+ """Convert a filename to a valid docker tag.
335
+ See https://docs.docker.com/engine/reference/commandline/tag/ for valid tag
336
+ format.
337
+
338
+ Args:
339
+ filename (str): The filename to be converted.
340
+
341
+ Returns:
342
+ str: The sanitized Docker tag.
343
+ """
344
+ # Replace any character not allowed with an underscore
345
+ allowed_chars = set(string.ascii_letters + string.digits + "_.-")
346
+ sanitized = "".join(char if char in allowed_chars else "_" for char in filename)
347
+
348
+ # Ensure it does not start with a period or a dash
349
+ if sanitized.startswith(".") or sanitized.startswith("-"):
350
+ sanitized = "_" + sanitized[1:]
351
+
352
+ # Truncate if longer than 128 characters
353
+ return sanitized[:128]
354
+
355
+
356
+ def execute_code(
357
+ code: Optional[str] = None,
358
+ timeout: Optional[int] = None,
359
+ filename: Optional[str] = None,
360
+ work_dir: Optional[str] = None,
361
+ use_docker: Union[List[str], str, bool] = SENTINEL,
362
+ lang: Optional[str] = "python",
363
+ ) -> Tuple[int, str, Optional[str]]:
364
+ """Execute code in a docker container.
365
+ This function is not tested on MacOS.
366
+
367
+ Args:
368
+ code (Optional, str): The code to execute.
369
+ If None, the code from the file specified by filename will be executed.
370
+ Either code or filename must be provided.
371
+ timeout (Optional, int): The maximum execution time in seconds.
372
+ If None, a default timeout will be used. The default timeout is 600 seconds. On Windows, the timeout is not enforced when use_docker=False.
373
+ filename (Optional, str): The file name to save the code or where the code is stored when `code` is None.
374
+ If None, a file with a randomly generated name will be created.
375
+ The randomly generated file will be deleted after execution.
376
+ The file name must be a relative path. Relative paths are relative to the working directory.
377
+ work_dir (Optional, str): The working directory for the code execution.
378
+ If None, a default working directory will be used.
379
+ The default working directory is the "extensions" directory under
380
+ "path_to_autogen".
381
+ use_docker (list, str or bool): The docker image to use for code execution.
382
+ Default is True, which means the code will be executed in a docker container. A default list of images will be used.
383
+ If a list or a str of image name(s) is provided, the code will be executed in a docker container
384
+ with the first image successfully pulled.
385
+ If False, the code will be executed in the current environment.
386
+ Expected behaviour:
387
+ - If `use_docker` is not set (i.e. left default to True) or is explicitly set to True and the docker package is available, the code will run in a Docker container.
388
+ - If `use_docker` is not set (i.e. left default to True) or is explicitly set to True but the Docker package is missing or docker isn't running, an error will be raised.
389
+ - If `use_docker` is explicitly set to False, the code will run natively.
390
+ If the code is executed in the current environment,
391
+ the code must be trusted.
392
+ lang (Optional, str): The language of the code. Default is "python".
393
+
394
+ Returns:
395
+ int: 0 if the code executes successfully.
396
+ str: The error message if the code fails to execute; the stdout otherwise.
397
+ image: The docker image name after container run when docker is used.
398
+ """
399
+ if all((code is None, filename is None)):
400
+ error_msg = f"Either {code=} or {filename=} must be provided."
401
+ logger.error(error_msg)
402
+ raise AssertionError(error_msg)
403
+
404
+ running_inside_docker = in_docker_container()
405
+ docker_running = is_docker_running()
406
+
407
+ # SENTINEL is used to indicate that the user did not explicitly set the argument
408
+ if use_docker is SENTINEL:
409
+ use_docker = decide_use_docker(use_docker=None)
410
+ check_can_use_docker_or_throw(use_docker)
411
+
412
+ timeout = timeout or DEFAULT_TIMEOUT
413
+ original_filename = filename
414
+ if WIN32 and lang in ["sh", "shell"] and (not use_docker):
415
+ lang = "ps1"
416
+ if filename is None:
417
+ code_hash = md5(code.encode()).hexdigest()
418
+ # create a file with a automatically generated name
419
+ filename = f"tmp_code_{code_hash}.{'py' if lang.startswith('python') else lang}"
420
+ if work_dir is None:
421
+ work_dir = WORKING_DIR
422
+
423
+ filepath = os.path.join(work_dir, filename)
424
+ file_dir = os.path.dirname(filepath)
425
+ os.makedirs(file_dir, exist_ok=True)
426
+
427
+ if code is not None:
428
+ with open(filepath, "w", encoding="utf-8") as fout:
429
+ fout.write(code)
430
+
431
+ if not use_docker or running_inside_docker:
432
+ # already running in a docker container
433
+ cmd = [
434
+ sys.executable if lang.startswith("python") else _cmd(lang),
435
+ f".\\{filename}" if WIN32 else filename,
436
+ ]
437
+ with ThreadPoolExecutor(max_workers=1) as executor:
438
+ future = executor.submit(
439
+ subprocess.run,
440
+ cmd,
441
+ cwd=work_dir,
442
+ capture_output=True,
443
+ text=True,
444
+ )
445
+ try:
446
+ result = future.result(timeout=timeout)
447
+ except TimeoutError:
448
+ if original_filename is None:
449
+ os.remove(filepath)
450
+ return 1, TIMEOUT_MSG, None
451
+ if original_filename is None:
452
+ os.remove(filepath)
453
+ if result.returncode:
454
+ logs = result.stderr
455
+ if original_filename is None:
456
+ abs_path = str(pathlib.Path(filepath).absolute())
457
+ logs = logs.replace(str(abs_path), "").replace(filename, "")
458
+ else:
459
+ abs_path = str(pathlib.Path(work_dir).absolute()) + PATH_SEPARATOR
460
+ logs = logs.replace(str(abs_path), "")
461
+ else:
462
+ logs = result.stdout
463
+ return result.returncode, logs, None
464
+
465
+ # create a docker client
466
+ if use_docker and not docker_running:
467
+ raise RuntimeError(
468
+ "Docker package is missing or docker is not running. Please make sure docker is running or set use_docker=False."
469
+ )
470
+
471
+ client = docker.from_env()
472
+
473
+ image_list = (
474
+ ["python:3-slim", "python:3", "python:3-windowsservercore"]
475
+ if use_docker is True
476
+ else [use_docker] if isinstance(use_docker, str) else use_docker
477
+ )
478
+ for image in image_list:
479
+ # check if the image exists
480
+ try:
481
+ client.images.get(image)
482
+ break
483
+ except docker.errors.ImageNotFound:
484
+ # pull the image
485
+ print("Pulling image", image)
486
+ try:
487
+ client.images.pull(image)
488
+ break
489
+ except docker.errors.DockerException:
490
+ print("Failed to pull image", image)
491
+ # get a randomized str based on current time to wrap the exit code
492
+ exit_code_str = f"exitcode{time.time()}"
493
+ abs_path = pathlib.Path(work_dir).absolute()
494
+ cmd = [
495
+ "sh",
496
+ "-c",
497
+ f'{_cmd(lang)} "{filename}"; exit_code=$?; echo -n {exit_code_str}; echo -n $exit_code; echo {exit_code_str}',
498
+ ]
499
+ # create a docker container
500
+ container = client.containers.run(
501
+ image,
502
+ command=cmd,
503
+ working_dir="/workspace",
504
+ detach=True,
505
+ # get absolute path to the working directory
506
+ volumes={abs_path: {"bind": "/workspace", "mode": "rw"}},
507
+ )
508
+ start_time = time.time()
509
+ while container.status != "exited" and time.time() - start_time < timeout:
510
+ # Reload the container object
511
+ container.reload()
512
+ if container.status != "exited":
513
+ container.stop()
514
+ container.remove()
515
+ if original_filename is None:
516
+ os.remove(filepath)
517
+ return 1, TIMEOUT_MSG, image
518
+ # get the container logs
519
+ logs = container.logs().decode("utf-8").rstrip()
520
+ # commit the image
521
+ tag = _sanitize_filename_for_docker_tag(filename)
522
+ container.commit(repository="python", tag=tag)
523
+ # remove the container
524
+ container.remove()
525
+ # check if the code executed successfully
526
+ exit_code = container.attrs["State"]["ExitCode"]
527
+ if exit_code == 0:
528
+ # extract the exit code from the logs
529
+ pattern = re.compile(f"{exit_code_str}(\\d+){exit_code_str}")
530
+ match = pattern.search(logs)
531
+ exit_code = 1 if match is None else int(match.group(1))
532
+ # remove the exit code from the logs
533
+ logs = logs if match is None else pattern.sub("", logs)
534
+
535
+ if original_filename is None:
536
+ os.remove(filepath)
537
+ if exit_code:
538
+ logs = logs.replace(f"/workspace/{filename if original_filename is None else ''}", "")
539
+ # return the exit code, logs and image
540
+ return exit_code, logs, f"python:{tag}"
541
+
542
+
543
+ _GENERATE_ASSERTIONS_CONFIG = {
544
+ "prompt": """Given the signature and docstring, write the exactly same number of assertion(s) for the provided example(s) in the docstring, without assertion messages.
545
+
546
+ func signature:
547
+ {definition}
548
+ assertions:""",
549
+ "model": FAST_MODEL,
550
+ "max_tokens": 256,
551
+ "stop": "\n\n",
552
+ }
553
+
554
+
555
+ def generate_assertions(definition: str, **config) -> Tuple[str, float]:
556
+ """(openai<1) Generate assertions for a function.
557
+
558
+ Args:
559
+ definition (str): The function definition, including the signature and docstr.
560
+ config (Optional, dict): The configuration for the API call.
561
+
562
+ Returns:
563
+ str: The generated assertions.
564
+ float: The cost of the generation.
565
+ """
566
+ params = {**_GENERATE_ASSERTIONS_CONFIG, **config}
567
+ response = oai.Completion.create(
568
+ {"definition": definition},
569
+ **params,
570
+ )
571
+ assertions = oai.Completion.extract_text(response)[0]
572
+ return assertions, response["cost"]
573
+
574
+
575
+ def _remove_check(response):
576
+ """Remove the check function from the response."""
577
+ # find the position of the check function
578
+ pos = response.find("def check(")
579
+ if pos == -1:
580
+ return response
581
+ return response[:pos]
582
+
583
+
584
+ def eval_function_completions(
585
+ responses: List[str],
586
+ definition: str,
587
+ test: Optional[str] = None,
588
+ entry_point: Optional[str] = None,
589
+ assertions: Optional[Union[str, Callable[[str], Tuple[str, float]]]] = None,
590
+ timeout: Optional[float] = 3,
591
+ use_docker: Optional[bool] = True,
592
+ ) -> Dict:
593
+ """(openai<1) Select a response from a list of responses for the function completion task (using generated assertions), and/or evaluate if the task is successful using a gold test.
594
+
595
+ Args:
596
+ responses (list): The list of responses.
597
+ definition (str): The input definition.
598
+ test (Optional, str): The test code.
599
+ entry_point (Optional, str): The name of the function.
600
+ assertions (Optional, str or Callable): The assertion code which serves as a filter of the responses, or an assertion generator.
601
+ When provided, only the responses that pass the assertions will be considered for the actual test (if provided).
602
+ timeout (Optional, float): The timeout for executing the code.
603
+
604
+ Returns:
605
+ dict: The success metrics.
606
+ """
607
+ n = len(responses)
608
+ if assertions is None:
609
+ # no assertion filter
610
+ success_list = []
611
+ for i in range(n):
612
+ response = _remove_check(responses[i])
613
+ code = (
614
+ f"{response}\n{test}\ncheck({entry_point})"
615
+ if response.startswith("def")
616
+ else f"{definition}{response}\n{test}\ncheck({entry_point})"
617
+ )
618
+ success = execute_code(code, timeout=timeout, use_docker=use_docker)[0] == 0
619
+ success_list.append(success)
620
+ return {
621
+ "expected_success": 1 - pow(1 - sum(success_list) / n, n),
622
+ "success": any(s for s in success_list),
623
+ }
624
+ if callable(assertions) and n > 1:
625
+ # assertion generator
626
+ assertions, gen_cost = assertions(definition)
627
+ else:
628
+ assertions, gen_cost = None, 0
629
+ if n > 1 or test is None:
630
+ for i in range(n):
631
+ response = responses[i] = _remove_check(responses[i])
632
+ code = (
633
+ f"{response}\n{assertions}" if response.startswith("def") else f"{definition}{response}\n{assertions}"
634
+ )
635
+ succeed_assertions = execute_code(code, timeout=timeout, use_docker=use_docker)[0] == 0
636
+ if succeed_assertions:
637
+ break
638
+ else:
639
+ # just test, no need to check assertions
640
+ succeed_assertions = False
641
+ i, response = 0, responses[0]
642
+ if test is None:
643
+ # no test code
644
+ return {
645
+ "index_selected": i,
646
+ "succeed_assertions": succeed_assertions,
647
+ "gen_cost": gen_cost,
648
+ "assertions": assertions,
649
+ }
650
+ code_test = (
651
+ f"{response}\n{test}\ncheck({entry_point})"
652
+ if response.startswith("def")
653
+ else f"{definition}{response}\n{test}\ncheck({entry_point})"
654
+ )
655
+ success = execute_code(code_test, timeout=timeout, use_docker=use_docker)[0] == 0
656
+ return {
657
+ "index_selected": i,
658
+ "succeed_assertions": succeed_assertions,
659
+ "success": success,
660
+ "gen_cost": gen_cost,
661
+ "assertions": assertions,
662
+ }
663
+
664
+
665
+ _FUNC_COMPLETION_PROMPT = "# Python 3{definition}"
666
+ _FUNC_COMPLETION_STOP = ["\nclass", "\ndef", "\nif", "\nprint"]
667
+ _IMPLEMENT_CONFIGS = [
668
+ {"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "cache_seed": 0},
669
+ {"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 7, "cache_seed": 0},
670
+ {"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "cache_seed": 1},
671
+ {"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 2, "cache_seed": 2},
672
+ {"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 1, "cache_seed": 2},
673
+ ]
674
+
675
+
676
+ class PassAssertionFilter:
677
+ def __init__(self, assertions):
678
+ self._assertions = assertions
679
+ self.cost = 0
680
+ self.metrics = self.responses = None
681
+
682
+ def pass_assertions(self, context, response, **_):
683
+ """(openai<1) Check if the response passes the assertions."""
684
+ responses = oai.Completion.extract_text(response)
685
+ metrics = eval_function_completions(responses, context["definition"], assertions=self._assertions)
686
+ self._assertions = metrics["assertions"]
687
+ self.cost += metrics["gen_cost"]
688
+ self.metrics = metrics
689
+ self.responses = responses
690
+ return metrics["succeed_assertions"]
691
+
692
+
693
+ def implement(
694
+ definition: str,
695
+ configs: Optional[List[Dict]] = None,
696
+ assertions: Optional[Union[str, Callable[[str], Tuple[str, float]]]] = generate_assertions,
697
+ ) -> Tuple[str, float]:
698
+ """(openai<1) Implement a function from a definition.
699
+
700
+ Args:
701
+ definition (str): The function definition, including the signature and docstr.
702
+ configs (list): The list of configurations for completion.
703
+ assertions (Optional, str or Callable): The assertion code which serves as a filter of the responses, or an assertion generator.
704
+
705
+ Returns:
706
+ str: The implementation.
707
+ float: The cost of the implementation.
708
+ int: The index of the configuration which generates the implementation.
709
+ """
710
+ cost = 0
711
+ configs = configs or _IMPLEMENT_CONFIGS
712
+ if len(configs) > 1 and callable(assertions):
713
+ assertions, cost = assertions(definition)
714
+ assertion_filter = PassAssertionFilter(assertions)
715
+ response = oai.Completion.create(
716
+ {"definition": definition}, config_list=configs, filter_func=assertion_filter.pass_assertions
717
+ )
718
+ cost += assertion_filter.cost + response["cost"]
719
+ return assertion_filter.responses[assertion_filter.metrics["index_selected"]], cost, response["config_id"]
720
+
721
+ # for i, config in enumerate(configs):
722
+ # response = oai.Completion.create({"definition": definition}, **config)
723
+ # cost += oai.Completion.cost(response)
724
+ # responses = oai.Completion.extract_text(response)
725
+ # metrics = eval_function_completions(responses, definition, assertions=assertions)
726
+ # assertions = metrics["assertions"]
727
+ # cost += metrics["gen_cost"]
728
+ # if metrics["succeed_assertions"] or i == len(configs) - 1:
729
+ # return responses[metrics["index_selected"]], cost, i
730
+
731
+
732
+ def create_virtual_env(dir_path: str, **env_args) -> SimpleNamespace:
733
+ """Creates a python virtual environment and returns the context.
734
+
735
+ Args:
736
+ dir_path (str): Directory path where the env will be created.
737
+ **env_args: Any extra args to pass to the `EnvBuilder`
738
+
739
+ Returns:
740
+ SimpleNamespace: the virtual env context object."""
741
+ if not env_args:
742
+ env_args = {"with_pip": True}
743
+ env_builder = venv.EnvBuilder(**env_args)
744
+ env_builder.create(dir_path)
745
+ return env_builder.ensure_directories(dir_path)