ag2 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (160) hide show
  1. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/METADATA +5 -146
  2. ag2-0.5.0.dist-info/RECORD +6 -0
  3. ag2-0.5.0.dist-info/top_level.txt +1 -0
  4. ag2-0.4.1.dist-info/RECORD +0 -158
  5. ag2-0.4.1.dist-info/top_level.txt +0 -1
  6. autogen/__init__.py +0 -17
  7. autogen/_pydantic.py +0 -116
  8. autogen/agentchat/__init__.py +0 -42
  9. autogen/agentchat/agent.py +0 -142
  10. autogen/agentchat/assistant_agent.py +0 -85
  11. autogen/agentchat/chat.py +0 -306
  12. autogen/agentchat/contrib/__init__.py +0 -0
  13. autogen/agentchat/contrib/agent_builder.py +0 -788
  14. autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
  15. autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
  16. autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
  17. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
  18. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
  19. autogen/agentchat/contrib/agent_eval/task.py +0 -43
  20. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  21. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  22. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  23. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  24. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  25. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  26. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  27. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  28. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  29. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  30. autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
  31. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
  32. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
  33. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
  34. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
  35. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
  36. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
  37. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
  38. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
  39. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
  40. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
  41. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
  42. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
  43. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
  44. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
  45. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
  49. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
  50. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
  51. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
  52. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
  53. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
  54. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
  55. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
  56. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
  57. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
  58. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
  59. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
  60. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
  61. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
  62. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
  63. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
  64. autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
  65. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
  66. autogen/agentchat/contrib/captainagent.py +0 -490
  67. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  68. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  69. autogen/agentchat/contrib/graph_rag/document.py +0 -30
  70. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
  71. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
  72. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
  73. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
  74. autogen/agentchat/contrib/img_utils.py +0 -390
  75. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  76. autogen/agentchat/contrib/llava_agent.py +0 -176
  77. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  78. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  79. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  80. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  81. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
  82. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  83. autogen/agentchat/contrib/swarm_agent.py +0 -463
  84. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  85. autogen/agentchat/contrib/tool_retriever.py +0 -120
  86. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  87. autogen/agentchat/contrib/vectordb/base.py +0 -243
  88. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  89. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  90. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  91. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  92. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  93. autogen/agentchat/contrib/web_surfer.py +0 -305
  94. autogen/agentchat/conversable_agent.py +0 -2908
  95. autogen/agentchat/groupchat.py +0 -1668
  96. autogen/agentchat/user_proxy_agent.py +0 -109
  97. autogen/agentchat/utils.py +0 -207
  98. autogen/browser_utils.py +0 -291
  99. autogen/cache/__init__.py +0 -10
  100. autogen/cache/abstract_cache_base.py +0 -78
  101. autogen/cache/cache.py +0 -182
  102. autogen/cache/cache_factory.py +0 -85
  103. autogen/cache/cosmos_db_cache.py +0 -150
  104. autogen/cache/disk_cache.py +0 -109
  105. autogen/cache/in_memory_cache.py +0 -61
  106. autogen/cache/redis_cache.py +0 -128
  107. autogen/code_utils.py +0 -745
  108. autogen/coding/__init__.py +0 -22
  109. autogen/coding/base.py +0 -113
  110. autogen/coding/docker_commandline_code_executor.py +0 -262
  111. autogen/coding/factory.py +0 -45
  112. autogen/coding/func_with_reqs.py +0 -203
  113. autogen/coding/jupyter/__init__.py +0 -22
  114. autogen/coding/jupyter/base.py +0 -32
  115. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  116. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  117. autogen/coding/jupyter/jupyter_client.py +0 -224
  118. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  119. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  120. autogen/coding/local_commandline_code_executor.py +0 -410
  121. autogen/coding/markdown_code_extractor.py +0 -44
  122. autogen/coding/utils.py +0 -57
  123. autogen/exception_utils.py +0 -46
  124. autogen/extensions/__init__.py +0 -0
  125. autogen/formatting_utils.py +0 -76
  126. autogen/function_utils.py +0 -362
  127. autogen/graph_utils.py +0 -148
  128. autogen/io/__init__.py +0 -15
  129. autogen/io/base.py +0 -105
  130. autogen/io/console.py +0 -43
  131. autogen/io/websockets.py +0 -213
  132. autogen/logger/__init__.py +0 -11
  133. autogen/logger/base_logger.py +0 -140
  134. autogen/logger/file_logger.py +0 -287
  135. autogen/logger/logger_factory.py +0 -29
  136. autogen/logger/logger_utils.py +0 -42
  137. autogen/logger/sqlite_logger.py +0 -459
  138. autogen/math_utils.py +0 -356
  139. autogen/oai/__init__.py +0 -33
  140. autogen/oai/anthropic.py +0 -428
  141. autogen/oai/bedrock.py +0 -606
  142. autogen/oai/cerebras.py +0 -270
  143. autogen/oai/client.py +0 -1148
  144. autogen/oai/client_utils.py +0 -167
  145. autogen/oai/cohere.py +0 -453
  146. autogen/oai/completion.py +0 -1216
  147. autogen/oai/gemini.py +0 -469
  148. autogen/oai/groq.py +0 -281
  149. autogen/oai/mistral.py +0 -279
  150. autogen/oai/ollama.py +0 -582
  151. autogen/oai/openai_utils.py +0 -811
  152. autogen/oai/together.py +0 -343
  153. autogen/retrieve_utils.py +0 -487
  154. autogen/runtime_logging.py +0 -163
  155. autogen/token_count_utils.py +0 -259
  156. autogen/types.py +0 -20
  157. autogen/version.py +0 -7
  158. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/LICENSE +0 -0
  159. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/NOTICE.md +0 -0
  160. {ag2-0.4.1.dist-info → ag2-0.5.0.dist-info}/WHEEL +0 -0
@@ -1,705 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import hashlib
8
- import os
9
- import re
10
- import uuid
11
- from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
12
-
13
- from IPython import get_ipython
14
-
15
- try:
16
- import chromadb
17
- except ImportError as e:
18
- raise ImportError(f"{e}. You can try `pip install autogen[retrievechat]`, or install `chromadb` manually.")
19
- from autogen.agentchat import UserProxyAgent
20
- from autogen.agentchat.agent import Agent
21
- from autogen.agentchat.contrib.vectordb.base import Document, QueryResults, VectorDB, VectorDBFactory
22
- from autogen.agentchat.contrib.vectordb.utils import (
23
- chroma_results_to_query_results,
24
- filter_results_by_distance,
25
- get_logger,
26
- )
27
- from autogen.code_utils import extract_code
28
- from autogen.retrieve_utils import (
29
- TEXT_FORMATS,
30
- create_vector_db_from_dir,
31
- get_files_from_dir,
32
- query_vector_db,
33
- split_files_to_chunks,
34
- )
35
- from autogen.token_count_utils import count_token
36
-
37
- from ...formatting_utils import colored
38
-
39
- logger = get_logger(__name__)
40
-
41
- PROMPT_DEFAULT = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the
42
- context provided by the user. You should follow the following steps to answer a question:
43
- Step 1, you estimate the user's intent based on the question and context. The intent can be a code generation task or
44
- a question answering task.
45
- Step 2, you reply based on the intent.
46
- If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
47
- If user's intent is code generation, you must obey the following rules:
48
- Rule 1. You MUST NOT install any packages because all the packages needed are already installed.
49
- Rule 2. You must follow the formats below to write your code:
50
- ```language
51
- # your code
52
- ```
53
-
54
- If user's intent is question answering, you must give as short an answer as possible.
55
-
56
- User's question is: {input_question}
57
-
58
- Context is: {input_context}
59
-
60
- The source of the context is: {input_sources}
61
-
62
- If you can answer the question, in the end of your answer, add the source of the context in the format of `Sources: source1, source2, ...`.
63
- """
64
-
65
- PROMPT_CODE = """You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the
66
- context provided by the user.
67
- If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
68
- For code generation, you must obey the following rules:
69
- Rule 1. You MUST NOT install any packages because all the packages needed are already installed.
70
- Rule 2. You must follow the formats below to write your code:
71
- ```language
72
- # your code
73
- ```
74
-
75
- User's question is: {input_question}
76
-
77
- Context is: {input_context}
78
- """
79
-
80
- PROMPT_QA = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the
81
- context provided by the user.
82
- If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.
83
- You must give as short an answer as possible.
84
-
85
- User's question is: {input_question}
86
-
87
- Context is: {input_context}
88
- """
89
-
90
- HASH_LENGTH = int(os.environ.get("HASH_LENGTH", 8))
91
- UPDATE_CONTEXT_IN_PROMPT = "you should reply exactly `UPDATE CONTEXT`"
92
-
93
-
94
- class RetrieveUserProxyAgent(UserProxyAgent):
95
- """(In preview) The Retrieval-Augmented User Proxy retrieves document chunks based on the embedding
96
- similarity, and sends them along with the question to the Retrieval-Augmented Assistant
97
- """
98
-
99
- def __init__(
100
- self,
101
- name="RetrieveChatAgent", # default set to RetrieveChatAgent
102
- human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "ALWAYS",
103
- is_termination_msg: Optional[Callable[[Dict], bool]] = None,
104
- retrieve_config: Optional[Dict] = None, # config for the retrieve agent
105
- **kwargs,
106
- ):
107
- r"""
108
- Args:
109
- name (str): name of the agent.
110
-
111
- human_input_mode (str): whether to ask for human inputs every time a message is received.
112
- Possible values are "ALWAYS", "TERMINATE", "NEVER".
113
- 1. When "ALWAYS", the agent prompts for human input every time a message is received.
114
- Under this mode, the conversation stops when the human input is "exit",
115
- or when is_termination_msg is True and there is no human input.
116
- 2. When "TERMINATE", the agent only prompts for human input only when a termination
117
- message is received or the number of auto reply reaches
118
- the max_consecutive_auto_reply.
119
- 3. When "NEVER", the agent will never prompt for human input. Under this mode, the
120
- conversation stops when the number of auto reply reaches the
121
- max_consecutive_auto_reply or when is_termination_msg is True.
122
-
123
- is_termination_msg (function): a function that takes a message in the form of a dictionary
124
- and returns a boolean value indicating if this received message is a termination message.
125
- The dict can contain the following keys: "content", "role", "name", "function_call".
126
-
127
- retrieve_config (dict or None): config for the retrieve agent.
128
-
129
- To use default config, set to None. Otherwise, set to a dictionary with the
130
- following keys:
131
- - `task` (Optional, str) - the task of the retrieve chat. Possible values are
132
- "code", "qa" and "default". System prompt will be different for different tasks.
133
- The default value is `default`, which supports both code and qa, and provides
134
- source information in the end of the response.
135
- - `vector_db` (Optional, Union[str, VectorDB]) - the vector db for the retrieve chat.
136
- If it's a string, it should be the type of the vector db, such as "chroma"; otherwise,
137
- it should be an instance of the VectorDB protocol. Default is "chroma".
138
- Set `None` to use the deprecated `client`.
139
- - `db_config` (Optional, Dict) - the config for the vector db. Default is `{}`. Please make
140
- sure you understand the config for the vector db you are using, otherwise, leave it as `{}`.
141
- Only valid when `vector_db` is a string.
142
- - `client` (Optional, chromadb.Client) - the chromadb client. If key not provided, a
143
- default client `chromadb.Client()` will be used. If you want to use other
144
- vector db, extend this class and override the `retrieve_docs` function.
145
- *[Deprecated]* use `vector_db` instead.
146
- - `docs_path` (Optional, Union[str, List[str]]) - the path to the docs directory. It
147
- can also be the path to a single file, the url to a single file or a list
148
- of directories, files and urls. Default is None, which works only if the
149
- collection is already created.
150
- - `extra_docs` (Optional, bool) - when true, allows adding documents with unique IDs
151
- without overwriting existing ones; when false, it replaces existing documents
152
- using default IDs, risking collection overwrite., when set to true it enables
153
- the system to assign unique IDs starting from "length+i" for new document
154
- chunks, preventing the replacement of existing documents and facilitating the
155
- addition of more content to the collection..
156
- By default, "extra_docs" is set to false, starting document IDs from zero.
157
- This poses a risk as new documents might overwrite existing ones, potentially
158
- causing unintended loss or alteration of data in the collection.
159
- *[Deprecated]* use `new_docs` when use `vector_db` instead of `client`.
160
- - `new_docs` (Optional, bool) - when True, only adds new documents to the collection;
161
- when False, updates existing documents and adds new ones. Default is True.
162
- Document id is used to determine if a document is new or existing. By default, the
163
- id is the hash value of the content.
164
- - `model` (Optional, str) - the model to use for the retrieve chat.
165
- If key not provided, a default model `gpt-4` will be used.
166
- - `chunk_token_size` (Optional, int) - the chunk token size for the retrieve chat.
167
- If key not provided, a default size `max_tokens * 0.4` will be used.
168
- - `context_max_tokens` (Optional, int) - the context max token size for the
169
- retrieve chat.
170
- If key not provided, a default size `max_tokens * 0.8` will be used.
171
- - `chunk_mode` (Optional, str) - the chunk mode for the retrieve chat. Possible values
172
- are "multi_lines" and "one_line". If key not provided, a default mode
173
- `multi_lines` will be used.
174
- - `must_break_at_empty_line` (Optional, bool) - chunk will only break at empty line
175
- if True. Default is True.
176
- If chunk_mode is "one_line", this parameter will be ignored.
177
- - `embedding_model` (Optional, str) - the embedding model to use for the retrieve chat.
178
- If key not provided, a default model `all-MiniLM-L6-v2` will be used. All available
179
- models can be found at `https://www.sbert.net/docs/pretrained_models.html`.
180
- The default model is a fast model. If you want to use a high performance model,
181
- `all-mpnet-base-v2` is recommended.
182
- *[Deprecated]* no need when use `vector_db` instead of `client`.
183
- - `embedding_function` (Optional, Callable) - the embedding function for creating the
184
- vector db. Default is None, SentenceTransformer with the given `embedding_model`
185
- will be used. If you want to use OpenAI, Cohere, HuggingFace or other embedding
186
- functions, you can pass it here,
187
- follow the examples in `https://docs.trychroma.com/embeddings`.
188
- - `customized_prompt` (Optional, str) - the customized prompt for the retrieve chat.
189
- Default is None.
190
- - `customized_answer_prefix` (Optional, str) - the customized answer prefix for the
191
- retrieve chat. Default is "".
192
- If not "" and the customized_answer_prefix is not in the answer,
193
- `Update Context` will be triggered.
194
- - `update_context` (Optional, bool) - if False, will not apply `Update Context` for
195
- interactive retrieval. Default is True.
196
- - `collection_name` (Optional, str) - the name of the collection.
197
- If key not provided, a default name `autogen-docs` will be used.
198
- - `get_or_create` (Optional, bool) - Whether to get the collection if it exists. Default is False.
199
- - `overwrite` (Optional, bool) - Whether to overwrite the collection if it exists. Default is False.
200
- Case 1. if the collection does not exist, create the collection.
201
- Case 2. the collection exists, if overwrite is True, it will overwrite the collection.
202
- Case 3. the collection exists and overwrite is False, if get_or_create is True, it will get the collection,
203
- otherwise it raise a ValueError.
204
- - `custom_token_count_function` (Optional, Callable) - a custom function to count the
205
- number of tokens in a string.
206
- The function should take (text:str, model:str) as input and return the
207
- token_count(int). the retrieve_config["model"] will be passed in the function.
208
- Default is autogen.token_count_utils.count_token that uses tiktoken, which may
209
- not be accurate for non-OpenAI models.
210
- - `custom_text_split_function` (Optional, Callable) - a custom function to split a
211
- string into a list of strings.
212
- Default is None, will use the default function in
213
- `autogen.retrieve_utils.split_text_to_chunks`.
214
- - `custom_text_types` (Optional, List[str]) - a list of file types to be processed.
215
- Default is `autogen.retrieve_utils.TEXT_FORMATS`.
216
- This only applies to files under the directories in `docs_path`. Explicitly
217
- included files and urls will be chunked regardless of their types.
218
- - `recursive` (Optional, bool) - whether to search documents recursively in the
219
- docs_path. Default is True.
220
- - `distance_threshold` (Optional, float) - the threshold for the distance score, only
221
- distance smaller than it will be returned. Will be ignored if < 0. Default is -1.
222
-
223
- `**kwargs` (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
224
-
225
- Example:
226
-
227
- Example of overriding retrieve_docs - If you have set up a customized vector db, and it's
228
- not compatible with chromadb, you can easily plug in it with below code.
229
- *[Deprecated]* use `vector_db` instead. You can extend VectorDB and pass it to the agent.
230
- ```python
231
- class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent):
232
- def query_vector_db(
233
- self,
234
- query_texts: List[str],
235
- n_results: int = 10,
236
- search_string: str = "",
237
- **kwargs,
238
- ) -> Dict[str, Union[List[str], List[List[str]]]]:
239
- # define your own query function here
240
- pass
241
-
242
- def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = "", **kwargs):
243
- results = self.query_vector_db(
244
- query_texts=[problem],
245
- n_results=n_results,
246
- search_string=search_string,
247
- **kwargs,
248
- )
249
-
250
- self._results = results
251
- print("doc_ids: ", results["ids"])
252
- ```
253
- """
254
- super().__init__(
255
- name=name,
256
- human_input_mode=human_input_mode,
257
- **kwargs,
258
- )
259
-
260
- self._retrieve_config = {} if retrieve_config is None else retrieve_config
261
- self._task = self._retrieve_config.get("task", "default")
262
- self._vector_db = self._retrieve_config.get("vector_db", "chroma")
263
- self._db_config = self._retrieve_config.get("db_config", {})
264
- self._docs_path = self._retrieve_config.get("docs_path", None)
265
- self._extra_docs = self._retrieve_config.get("extra_docs", False)
266
- self._new_docs = self._retrieve_config.get("new_docs", True)
267
- self._collection_name = self._retrieve_config.get("collection_name", "autogen-docs")
268
- if "docs_path" not in self._retrieve_config:
269
- logger.warning(
270
- "docs_path is not provided in retrieve_config. "
271
- f"Will raise ValueError if the collection `{self._collection_name}` doesn't exist. "
272
- "Set docs_path to None to suppress this warning."
273
- )
274
- self._model = self._retrieve_config.get("model", "gpt-4")
275
- self._max_tokens = self.get_max_tokens(self._model)
276
- self._chunk_token_size = int(self._retrieve_config.get("chunk_token_size", self._max_tokens * 0.4))
277
- self._chunk_mode = self._retrieve_config.get("chunk_mode", "multi_lines")
278
- self._must_break_at_empty_line = self._retrieve_config.get("must_break_at_empty_line", True)
279
- self._embedding_model = self._retrieve_config.get("embedding_model", "all-MiniLM-L6-v2")
280
- self._embedding_function = self._retrieve_config.get("embedding_function", None)
281
- self.customized_prompt = self._retrieve_config.get("customized_prompt", None)
282
- self.customized_answer_prefix = self._retrieve_config.get("customized_answer_prefix", "").upper()
283
- self.update_context = self._retrieve_config.get("update_context", True)
284
- self._get_or_create = self._retrieve_config.get("get_or_create", False) if self._docs_path is not None else True
285
- self._overwrite = self._retrieve_config.get("overwrite", False)
286
- self.custom_token_count_function = self._retrieve_config.get("custom_token_count_function", count_token)
287
- self.custom_text_split_function = self._retrieve_config.get("custom_text_split_function", None)
288
- self._custom_text_types = self._retrieve_config.get("custom_text_types", TEXT_FORMATS)
289
- self._recursive = self._retrieve_config.get("recursive", True)
290
- self._context_max_tokens = self._retrieve_config.get("context_max_tokens", self._max_tokens * 0.8)
291
- self._collection = True if self._docs_path is None else False # whether the collection is created
292
- self._ipython = get_ipython()
293
- self._doc_idx = -1 # the index of the current used doc
294
- self._results = [] # the results of the current query
295
- self._intermediate_answers = set() # the intermediate answers
296
- self._doc_contents = [] # the contents of the current used doc
297
- self._doc_ids = [] # the ids of the current used doc
298
- self._current_docs_in_context = [] # the ids of the current context sources
299
- self._search_string = "" # the search string used in the current query
300
- self._distance_threshold = self._retrieve_config.get("distance_threshold", -1)
301
- # update the termination message function
302
- self._is_termination_msg = (
303
- self._is_termination_msg_retrievechat if is_termination_msg is None else is_termination_msg
304
- )
305
- if isinstance(self._vector_db, str):
306
- if not isinstance(self._db_config, dict):
307
- raise ValueError("`db_config` should be a dictionary.")
308
- if "embedding_function" in self._retrieve_config:
309
- self._db_config["embedding_function"] = self._embedding_function
310
- self._vector_db = VectorDBFactory.create_vector_db(db_type=self._vector_db, **self._db_config)
311
- self._client = self._retrieve_config.get("client", None)
312
- if self._client is None and hasattr(self._vector_db, "client"):
313
- # Since the client arg is deprecated, let's check
314
- # if the `vector_db` instance has a 'client' attribute.
315
- self._client = getattr(self._vector_db, "client", None)
316
- if self._client is None:
317
- self._client = chromadb.Client()
318
- self.register_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply, position=2)
319
- self.register_hook(
320
- hookable_method="process_message_before_send",
321
- hook=self._check_update_context_before_send,
322
- )
323
-
324
- def _init_db(self):
325
- if not self._vector_db:
326
- return
327
-
328
- IS_TO_CHUNK = False # whether to chunk the raw files
329
- if self._new_docs:
330
- IS_TO_CHUNK = True
331
- if not self._docs_path:
332
- try:
333
- self._vector_db.get_collection(self._collection_name)
334
- logger.warning(f"`docs_path` is not provided. Use the existing collection `{self._collection_name}`.")
335
- self._overwrite = False
336
- self._get_or_create = True
337
- IS_TO_CHUNK = False
338
- except ValueError:
339
- raise ValueError(
340
- "`docs_path` is not provided. "
341
- f"The collection `{self._collection_name}` doesn't exist either. "
342
- "Please provide `docs_path` or create the collection first."
343
- )
344
- elif self._get_or_create and not self._overwrite:
345
- try:
346
- self._vector_db.get_collection(self._collection_name)
347
- logger.info(f"Use the existing collection `{self._collection_name}`.", color="green")
348
- except ValueError:
349
- IS_TO_CHUNK = True
350
- else:
351
- IS_TO_CHUNK = True
352
-
353
- self._vector_db.active_collection = self._vector_db.create_collection(
354
- self._collection_name, overwrite=self._overwrite, get_or_create=self._get_or_create
355
- )
356
-
357
- docs = None
358
- if IS_TO_CHUNK:
359
- if self.custom_text_split_function is not None:
360
- chunks, sources = split_files_to_chunks(
361
- get_files_from_dir(self._docs_path, self._custom_text_types, self._recursive),
362
- custom_text_split_function=self.custom_text_split_function,
363
- )
364
- else:
365
- chunks, sources = split_files_to_chunks(
366
- get_files_from_dir(self._docs_path, self._custom_text_types, self._recursive),
367
- self._chunk_token_size,
368
- self._chunk_mode,
369
- self._must_break_at_empty_line,
370
- )
371
- logger.info(f"Found {len(chunks)} chunks.")
372
-
373
- if self._new_docs:
374
- all_docs_ids = set(
375
- [
376
- doc["id"]
377
- for doc in self._vector_db.get_docs_by_ids(ids=None, collection_name=self._collection_name)
378
- ]
379
- )
380
- else:
381
- all_docs_ids = set()
382
-
383
- chunk_ids = (
384
- [hashlib.blake2b(chunk.encode("utf-8")).hexdigest()[:HASH_LENGTH] for chunk in chunks]
385
- if not self._vector_db.type == "qdrant"
386
- else [str(uuid.UUID(hex=hashlib.md5(chunk.encode("utf-8")).hexdigest())) for chunk in chunks]
387
- )
388
- chunk_ids_set = set(chunk_ids)
389
- chunk_ids_set_idx = [chunk_ids.index(hash_value) for hash_value in chunk_ids_set]
390
- docs = [
391
- Document(id=chunk_ids[idx], content=chunks[idx], metadata=sources[idx])
392
- for idx in chunk_ids_set_idx
393
- if chunk_ids[idx] not in all_docs_ids
394
- ]
395
-
396
- self._vector_db.insert_docs(docs=docs, collection_name=self._collection_name, upsert=True)
397
-
398
- def _is_termination_msg_retrievechat(self, message):
399
- """Check if a message is a termination message.
400
- For code generation, terminate when no code block is detected. Currently only detect python code blocks.
401
- For question answering, terminate when don't update context, i.e., answer is given.
402
- """
403
- if isinstance(message, dict):
404
- message = message.get("content")
405
- if message is None:
406
- return False
407
- cb = extract_code(message)
408
- contain_code = False
409
- for c in cb:
410
- # todo: support more languages
411
- if c[0] == "python":
412
- contain_code = True
413
- break
414
- update_context_case1, update_context_case2 = self._check_update_context(message)
415
- return not (contain_code or update_context_case1 or update_context_case2)
416
-
417
- def _check_update_context_before_send(self, sender, message, recipient, silent):
418
- if not isinstance(message, (str, dict)):
419
- return message
420
- elif isinstance(message, dict):
421
- msg_text = message.get("content", message)
422
- else:
423
- msg_text = message
424
-
425
- if "UPDATE CONTEXT" == msg_text.strip().upper():
426
- doc_contents = self._get_context(self._results)
427
-
428
- # Always use self.problem as the query text to retrieve docs, but each time we replace the context with the
429
- # next similar docs in the retrieved doc results.
430
- if not doc_contents:
431
- for _tmp_retrieve_count in range(1, 5):
432
- self._reset(intermediate=True)
433
- self.retrieve_docs(
434
- self.problem, self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
435
- )
436
- doc_contents = self._get_context(self._results)
437
- if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
438
- break
439
- msg_text = self._generate_message(doc_contents, task=self._task)
440
-
441
- if isinstance(message, dict):
442
- message["content"] = msg_text
443
- return message
444
-
445
- @staticmethod
446
- def get_max_tokens(model="gpt-3.5-turbo"):
447
- if "32k" in model:
448
- return 32000
449
- elif "16k" in model:
450
- return 16000
451
- elif "gpt-4" in model:
452
- return 8000
453
- else:
454
- return 4000
455
-
456
- def _reset(self, intermediate=False):
457
- self._doc_idx = -1 # the index of the current used doc
458
- self._results = [] # the results of the current query
459
- if not intermediate:
460
- self._intermediate_answers = set() # the intermediate answers
461
- self._doc_contents = [] # the contents of the current used doc
462
- self._doc_ids = [] # the ids of the current used doc
463
-
464
- def _get_context(self, results: QueryResults):
465
- doc_contents = ""
466
- self._current_docs_in_context = []
467
- current_tokens = 0
468
- _doc_idx = self._doc_idx
469
- _tmp_retrieve_count = 0
470
- for idx, doc in enumerate(results[0]):
471
- doc = doc[0]
472
- if idx <= _doc_idx:
473
- continue
474
- if doc["id"] in self._doc_ids:
475
- continue
476
- _doc_tokens = self.custom_token_count_function(doc["content"], self._model)
477
- if _doc_tokens > self._context_max_tokens:
478
- func_print = f"Skip doc_id {doc['id']} as it is too long to fit in the context."
479
- print(colored(func_print, "green"), flush=True)
480
- self._doc_idx = idx
481
- continue
482
- if current_tokens + _doc_tokens > self._context_max_tokens:
483
- break
484
- func_print = f"Adding content of doc {doc['id']} to context."
485
- print(colored(func_print, "green"), flush=True)
486
- current_tokens += _doc_tokens
487
- doc_contents += doc["content"] + "\n"
488
- _metadata = doc.get("metadata")
489
- if isinstance(_metadata, dict):
490
- self._current_docs_in_context.append(_metadata.get("source", ""))
491
- self._doc_idx = idx
492
- self._doc_ids.append(doc["id"])
493
- self._doc_contents.append(doc["content"])
494
- _tmp_retrieve_count += 1
495
- if _tmp_retrieve_count >= self.n_results:
496
- break
497
- return doc_contents
498
-
499
- def _generate_message(self, doc_contents, task="default"):
500
- if not doc_contents:
501
- print(colored("No more context, will terminate.", "green"), flush=True)
502
- return "TERMINATE"
503
- if self.customized_prompt:
504
- message = self.customized_prompt.format(input_question=self.problem, input_context=doc_contents)
505
- elif task.upper() == "CODE":
506
- message = PROMPT_CODE.format(input_question=self.problem, input_context=doc_contents)
507
- elif task.upper() == "QA":
508
- message = PROMPT_QA.format(input_question=self.problem, input_context=doc_contents)
509
- elif task.upper() == "DEFAULT":
510
- message = PROMPT_DEFAULT.format(
511
- input_question=self.problem, input_context=doc_contents, input_sources=self._current_docs_in_context
512
- )
513
- else:
514
- raise NotImplementedError(f"task {task} is not implemented.")
515
- return message
516
-
517
- def _check_update_context(self, message):
518
- if isinstance(message, dict):
519
- message = message.get("content", "")
520
- elif not isinstance(message, str):
521
- message = ""
522
- update_context_case1 = "UPDATE CONTEXT" in message.upper() and UPDATE_CONTEXT_IN_PROMPT not in message
523
- update_context_case2 = self.customized_answer_prefix and self.customized_answer_prefix not in message.upper()
524
- return update_context_case1, update_context_case2
525
-
526
- def _generate_retrieve_user_reply(
527
- self,
528
- messages: Optional[List[Dict]] = None,
529
- sender: Optional[Agent] = None,
530
- config: Optional[Any] = None,
531
- ) -> Tuple[bool, Union[str, Dict, None]]:
532
- """In this function, we will update the context and reset the conversation based on different conditions.
533
- We'll update the context and reset the conversation if update_context is True and either of the following:
534
- (1) the last message contains "UPDATE CONTEXT",
535
- (2) the last message doesn't contain "UPDATE CONTEXT" and the customized_answer_prefix is not in the message.
536
- """
537
- if config is None:
538
- config = self
539
- if messages is None:
540
- messages = self._oai_messages[sender]
541
- message = messages[-1]
542
- update_context_case1, update_context_case2 = self._check_update_context(message)
543
- if (update_context_case1 or update_context_case2) and self.update_context:
544
- print(colored("Updating context and resetting conversation.", "green"), flush=True)
545
- # extract the first sentence in the response as the intermediate answer
546
- _message = message.get("content", "").split("\n")[0].strip()
547
- _intermediate_info = re.split(r"(?<=[.!?])\s+", _message)
548
- self._intermediate_answers.add(_intermediate_info[0])
549
-
550
- if update_context_case1:
551
- # try to get more context from the current retrieved doc results because the results may be too long to fit
552
- # in the LLM context.
553
- doc_contents = self._get_context(self._results)
554
-
555
- # Always use self.problem as the query text to retrieve docs, but each time we replace the context with the
556
- # next similar docs in the retrieved doc results.
557
- if not doc_contents:
558
- for _tmp_retrieve_count in range(1, 5):
559
- self._reset(intermediate=True)
560
- self.retrieve_docs(
561
- self.problem, self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
562
- )
563
- doc_contents = self._get_context(self._results)
564
- if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
565
- break
566
- elif update_context_case2:
567
- # Use the current intermediate info as the query text to retrieve docs, and each time we append the top similar
568
- # docs in the retrieved doc results to the context.
569
- for _tmp_retrieve_count in range(5):
570
- self._reset(intermediate=True)
571
- self.retrieve_docs(
572
- _intermediate_info[0], self.n_results * (2 * _tmp_retrieve_count + 1), self._search_string
573
- )
574
- self._get_context(self._results)
575
- doc_contents = "\n".join(self._doc_contents) # + "\n" + "\n".join(self._intermediate_answers)
576
- if doc_contents or self.n_results * (2 * _tmp_retrieve_count + 1) >= len(self._results[0]):
577
- break
578
-
579
- self.clear_history()
580
- sender.clear_history()
581
- return True, self._generate_message(doc_contents, task=self._task)
582
- else:
583
- return False, None
584
-
585
- def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""):
586
- """Retrieve docs based on the given problem and assign the results to the class property `_results`.
587
- The retrieved docs should be type of `QueryResults` which is a list of tuples containing the document and
588
- the distance.
589
-
590
- Args:
591
- problem (str): the problem to be solved.
592
- n_results (int): the number of results to be retrieved. Default is 20.
593
- search_string (str): only docs that contain an exact match of this string will be retrieved. Default is "".
594
- Not used if the vector_db doesn't support it.
595
-
596
- Returns:
597
- None.
598
- """
599
- if isinstance(self._vector_db, VectorDB):
600
- if not self._collection or not self._get_or_create:
601
- print("Trying to create collection.")
602
- self._init_db()
603
- self._collection = True
604
- self._get_or_create = True
605
-
606
- kwargs = {}
607
- if hasattr(self._vector_db, "type") and self._vector_db.type == "chroma":
608
- kwargs["where_document"] = {"$contains": search_string} if search_string else None
609
- results = self._vector_db.retrieve_docs(
610
- queries=[problem],
611
- n_results=n_results,
612
- collection_name=self._collection_name,
613
- distance_threshold=self._distance_threshold,
614
- **kwargs,
615
- )
616
- self._search_string = search_string
617
- self._results = results
618
- print("VectorDB returns doc_ids: ", [[r[0]["id"] for r in rr] for rr in results])
619
- return
620
-
621
- if not self._collection or not self._get_or_create:
622
- print("Trying to create collection.")
623
- self._client = create_vector_db_from_dir(
624
- dir_path=self._docs_path,
625
- max_tokens=self._chunk_token_size,
626
- client=self._client,
627
- collection_name=self._collection_name,
628
- chunk_mode=self._chunk_mode,
629
- must_break_at_empty_line=self._must_break_at_empty_line,
630
- embedding_model=self._embedding_model,
631
- get_or_create=self._get_or_create,
632
- embedding_function=self._embedding_function,
633
- custom_text_split_function=self.custom_text_split_function,
634
- custom_text_types=self._custom_text_types,
635
- recursive=self._recursive,
636
- extra_docs=self._extra_docs,
637
- )
638
- self._collection = True
639
- self._get_or_create = True
640
-
641
- results = query_vector_db(
642
- query_texts=[problem],
643
- n_results=n_results,
644
- search_string=search_string,
645
- client=self._client,
646
- collection_name=self._collection_name,
647
- embedding_model=self._embedding_model,
648
- embedding_function=self._embedding_function,
649
- )
650
- results["contents"] = results.pop("documents")
651
- results = chroma_results_to_query_results(results, "distances")
652
- results = filter_results_by_distance(results, self._distance_threshold)
653
-
654
- self._search_string = search_string
655
- self._results = results
656
- print("doc_ids: ", [[r[0]["id"] for r in rr] for rr in results])
657
-
658
- @staticmethod
659
- def message_generator(sender, recipient, context):
660
- """
661
- Generate an initial message with the given context for the RetrieveUserProxyAgent.
662
- Args:
663
- sender (Agent): the sender agent. It should be the instance of RetrieveUserProxyAgent.
664
- recipient (Agent): the recipient agent. Usually it's the assistant agent.
665
- context (dict): the context for the message generation. It should contain the following keys:
666
- - `problem` (str) - the problem to be solved.
667
- - `n_results` (int) - the number of results to be retrieved. Default is 20.
668
- - `search_string` (str) - only docs that contain an exact match of this string will be retrieved. Default is "".
669
- Returns:
670
- str: the generated message ready to be sent to the recipient agent.
671
- """
672
- sender._reset()
673
-
674
- problem = context.get("problem", "")
675
- n_results = context.get("n_results", 20)
676
- search_string = context.get("search_string", "")
677
-
678
- sender.retrieve_docs(problem, n_results, search_string)
679
- sender.problem = problem
680
- sender.n_results = n_results
681
- doc_contents = sender._get_context(sender._results)
682
- message = sender._generate_message(doc_contents, sender._task)
683
- return message
684
-
685
- def run_code(self, code, **kwargs):
686
- lang = kwargs.get("lang", None)
687
- if code.startswith("!") or code.startswith("pip") or lang in ["bash", "shell", "sh"]:
688
- return (
689
- 0,
690
- "You MUST NOT install any packages because all the packages needed are already installed.",
691
- None,
692
- )
693
- if self._ipython is None or lang != "python":
694
- return super().run_code(code, **kwargs)
695
- else:
696
- result = self._ipython.run_cell(code)
697
- log = str(result.result)
698
- exitcode = 0 if result.success else 1
699
- if result.error_before_exec is not None:
700
- log += f"\n{result.error_before_exec}"
701
- exitcode = 1
702
- if result.error_in_exec is not None:
703
- log += f"\n{result.error_in_exec}"
704
- exitcode = 1
705
- return exitcode, log, None