ag2 0.4.1__py3-none-any.whl → 0.5.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (160) hide show
  1. {ag2-0.4.1.dist-info → ag2-0.5.0b2.dist-info}/METADATA +5 -146
  2. ag2-0.5.0b2.dist-info/RECORD +6 -0
  3. ag2-0.5.0b2.dist-info/top_level.txt +1 -0
  4. ag2-0.4.1.dist-info/RECORD +0 -158
  5. ag2-0.4.1.dist-info/top_level.txt +0 -1
  6. autogen/__init__.py +0 -17
  7. autogen/_pydantic.py +0 -116
  8. autogen/agentchat/__init__.py +0 -42
  9. autogen/agentchat/agent.py +0 -142
  10. autogen/agentchat/assistant_agent.py +0 -85
  11. autogen/agentchat/chat.py +0 -306
  12. autogen/agentchat/contrib/__init__.py +0 -0
  13. autogen/agentchat/contrib/agent_builder.py +0 -788
  14. autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
  15. autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
  16. autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
  17. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
  18. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
  19. autogen/agentchat/contrib/agent_eval/task.py +0 -43
  20. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  21. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  22. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  23. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  24. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  25. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  26. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  27. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  28. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  29. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  30. autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
  31. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
  32. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
  33. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
  34. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
  35. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
  36. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
  37. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
  38. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
  39. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
  40. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
  41. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
  42. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
  43. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
  44. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
  45. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
  49. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
  50. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
  51. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
  52. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
  53. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
  54. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
  55. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
  56. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
  57. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
  58. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
  59. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
  60. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
  61. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
  62. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
  63. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
  64. autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
  65. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
  66. autogen/agentchat/contrib/captainagent.py +0 -490
  67. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  68. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  69. autogen/agentchat/contrib/graph_rag/document.py +0 -30
  70. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
  71. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
  72. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
  73. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
  74. autogen/agentchat/contrib/img_utils.py +0 -390
  75. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  76. autogen/agentchat/contrib/llava_agent.py +0 -176
  77. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  78. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  79. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  80. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  81. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
  82. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  83. autogen/agentchat/contrib/swarm_agent.py +0 -463
  84. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  85. autogen/agentchat/contrib/tool_retriever.py +0 -120
  86. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  87. autogen/agentchat/contrib/vectordb/base.py +0 -243
  88. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  89. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  90. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  91. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  92. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  93. autogen/agentchat/contrib/web_surfer.py +0 -305
  94. autogen/agentchat/conversable_agent.py +0 -2908
  95. autogen/agentchat/groupchat.py +0 -1668
  96. autogen/agentchat/user_proxy_agent.py +0 -109
  97. autogen/agentchat/utils.py +0 -207
  98. autogen/browser_utils.py +0 -291
  99. autogen/cache/__init__.py +0 -10
  100. autogen/cache/abstract_cache_base.py +0 -78
  101. autogen/cache/cache.py +0 -182
  102. autogen/cache/cache_factory.py +0 -85
  103. autogen/cache/cosmos_db_cache.py +0 -150
  104. autogen/cache/disk_cache.py +0 -109
  105. autogen/cache/in_memory_cache.py +0 -61
  106. autogen/cache/redis_cache.py +0 -128
  107. autogen/code_utils.py +0 -745
  108. autogen/coding/__init__.py +0 -22
  109. autogen/coding/base.py +0 -113
  110. autogen/coding/docker_commandline_code_executor.py +0 -262
  111. autogen/coding/factory.py +0 -45
  112. autogen/coding/func_with_reqs.py +0 -203
  113. autogen/coding/jupyter/__init__.py +0 -22
  114. autogen/coding/jupyter/base.py +0 -32
  115. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  116. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  117. autogen/coding/jupyter/jupyter_client.py +0 -224
  118. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  119. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  120. autogen/coding/local_commandline_code_executor.py +0 -410
  121. autogen/coding/markdown_code_extractor.py +0 -44
  122. autogen/coding/utils.py +0 -57
  123. autogen/exception_utils.py +0 -46
  124. autogen/extensions/__init__.py +0 -0
  125. autogen/formatting_utils.py +0 -76
  126. autogen/function_utils.py +0 -362
  127. autogen/graph_utils.py +0 -148
  128. autogen/io/__init__.py +0 -15
  129. autogen/io/base.py +0 -105
  130. autogen/io/console.py +0 -43
  131. autogen/io/websockets.py +0 -213
  132. autogen/logger/__init__.py +0 -11
  133. autogen/logger/base_logger.py +0 -140
  134. autogen/logger/file_logger.py +0 -287
  135. autogen/logger/logger_factory.py +0 -29
  136. autogen/logger/logger_utils.py +0 -42
  137. autogen/logger/sqlite_logger.py +0 -459
  138. autogen/math_utils.py +0 -356
  139. autogen/oai/__init__.py +0 -33
  140. autogen/oai/anthropic.py +0 -428
  141. autogen/oai/bedrock.py +0 -606
  142. autogen/oai/cerebras.py +0 -270
  143. autogen/oai/client.py +0 -1148
  144. autogen/oai/client_utils.py +0 -167
  145. autogen/oai/cohere.py +0 -453
  146. autogen/oai/completion.py +0 -1216
  147. autogen/oai/gemini.py +0 -469
  148. autogen/oai/groq.py +0 -281
  149. autogen/oai/mistral.py +0 -279
  150. autogen/oai/ollama.py +0 -582
  151. autogen/oai/openai_utils.py +0 -811
  152. autogen/oai/together.py +0 -343
  153. autogen/retrieve_utils.py +0 -487
  154. autogen/runtime_logging.py +0 -163
  155. autogen/token_count_utils.py +0 -259
  156. autogen/types.py +0 -20
  157. autogen/version.py +0 -7
  158. {ag2-0.4.1.dist-info → ag2-0.5.0b2.dist-info}/LICENSE +0 -0
  159. {ag2-0.4.1.dist-info → ag2-0.5.0b2.dist-info}/NOTICE.md +0 -0
  160. {ag2-0.4.1.dist-info → ag2-0.5.0b2.dist-info}/WHEEL +0 -0
@@ -1,545 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import copy
8
- import json
9
- import logging
10
- import time
11
- from collections import defaultdict
12
- from typing import Any, Dict, List, Optional, Tuple, Union
13
-
14
- from autogen import OpenAIWrapper
15
- from autogen.agentchat.agent import Agent
16
- from autogen.agentchat.assistant_agent import AssistantAgent, ConversableAgent
17
- from autogen.oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant
18
- from autogen.runtime_logging import log_new_agent, logging_enabled
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- class GPTAssistantAgent(ConversableAgent):
24
- """
25
- An experimental AutoGen agent class that leverages the OpenAI Assistant API for conversational capabilities.
26
- This agent is unique in its reliance on the OpenAI Assistant for state management, differing from other agents like ConversableAgent.
27
- """
28
-
29
- DEFAULT_MODEL_NAME = "gpt-4-0125-preview"
30
-
31
- def __init__(
32
- self,
33
- name="GPT Assistant",
34
- instructions: Optional[str] = None,
35
- llm_config: Optional[Union[Dict, bool]] = None,
36
- assistant_config: Optional[Dict] = None,
37
- overwrite_instructions: bool = False,
38
- overwrite_tools: bool = False,
39
- **kwargs,
40
- ):
41
- """
42
- Args:
43
- name (str): name of the agent. It will be used to find the existing assistant by name. Please remember to delete an old assistant with the same name if you intend to create a new assistant with the same name.
44
- instructions (str): instructions for the OpenAI assistant configuration.
45
- When instructions is not None, the system message of the agent will be
46
- set to the provided instructions and used in the assistant run, irrespective
47
- of the overwrite_instructions flag. But when instructions is None,
48
- and the assistant does not exist, the system message will be set to
49
- AssistantAgent.DEFAULT_SYSTEM_MESSAGE. If the assistant exists, the
50
- system message will be set to the existing assistant instructions.
51
- llm_config (dict or False): llm inference configuration.
52
- - model: Model to use for the assistant (gpt-4-1106-preview, gpt-3.5-turbo-1106).
53
- assistant_config
54
- - assistant_id: ID of the assistant to use. If None, a new assistant will be created.
55
- - check_every_ms: check thread run status interval
56
- - tools: Give Assistants access to OpenAI-hosted tools like Code Interpreter and Knowledge Retrieval,
57
- or build your own tools using Function calling. ref https://platform.openai.com/docs/assistants/tools
58
- - file_ids: (Deprecated) files used by retrieval in run. It is Deprecated, use tool_resources instead. https://platform.openai.com/docs/assistants/migration/what-has-changed.
59
- - tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool.
60
- overwrite_instructions (bool): whether to overwrite the instructions of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
61
- overwrite_tools (bool): whether to overwrite the tools of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
62
- kwargs (dict): Additional configuration options for the agent.
63
- - verbose (bool): If set to True, enables more detailed output from the assistant thread.
64
- - Other kwargs: Except verbose, others are passed directly to ConversableAgent.
65
- """
66
-
67
- self._verbose = kwargs.pop("verbose", False)
68
- openai_client_cfg, openai_assistant_cfg = self._process_assistant_config(llm_config, assistant_config)
69
-
70
- super().__init__(
71
- name=name, system_message=instructions, human_input_mode="NEVER", llm_config=openai_client_cfg, **kwargs
72
- )
73
- if logging_enabled():
74
- log_new_agent(self, locals())
75
-
76
- # GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list()
77
- # See: https://github.com/microsoft/autogen/pull/1721
78
- model_name = self.DEFAULT_MODEL_NAME
79
- if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0:
80
- model_name = openai_client_cfg["config_list"][0].pop("model", self.DEFAULT_MODEL_NAME)
81
- else:
82
- model_name = openai_client_cfg.pop("model", self.DEFAULT_MODEL_NAME)
83
-
84
- logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name)
85
-
86
- oai_wrapper = OpenAIWrapper(**openai_client_cfg)
87
- if len(oai_wrapper._clients) > 1:
88
- logger.warning("GPT Assistant only supports one OpenAI client. Using the first client in the list.")
89
-
90
- self._openai_client = oai_wrapper._clients[0]._oai_client
91
- openai_assistant_id = openai_assistant_cfg.get("assistant_id", None)
92
- if openai_assistant_id is None:
93
- # try to find assistant by name first
94
- candidate_assistants = retrieve_assistants_by_name(self._openai_client, name)
95
- if len(candidate_assistants) > 0:
96
- # Filter out candidates with the same name but different instructions, file IDs, and function names.
97
- candidate_assistants = self.find_matching_assistant(
98
- candidate_assistants,
99
- instructions,
100
- openai_assistant_cfg.get("tools", []),
101
- )
102
-
103
- if len(candidate_assistants) == 0:
104
- logger.warning("No matching assistant found, creating a new assistant")
105
- # create a new assistant
106
- if instructions is None:
107
- logger.warning(
108
- "No instructions were provided for new assistant. Using default instructions from AssistantAgent.DEFAULT_SYSTEM_MESSAGE."
109
- )
110
- instructions = AssistantAgent.DEFAULT_SYSTEM_MESSAGE
111
- self._openai_assistant = create_gpt_assistant(
112
- self._openai_client,
113
- name=name,
114
- instructions=instructions,
115
- model=model_name,
116
- assistant_config=openai_assistant_cfg,
117
- )
118
- else:
119
- logger.warning(
120
- "Matching assistant found, using the first matching assistant: %s",
121
- candidate_assistants[0].__dict__,
122
- )
123
- self._openai_assistant = candidate_assistants[0]
124
- else:
125
- # retrieve an existing assistant
126
- self._openai_assistant = self._openai_client.beta.assistants.retrieve(openai_assistant_id)
127
- # if no instructions are provided, set the instructions to the existing instructions
128
- if instructions is None:
129
- logger.warning(
130
- "No instructions were provided for given assistant. Using existing instructions from assistant API."
131
- )
132
- instructions = self.get_assistant_instructions()
133
- elif overwrite_instructions is True:
134
- logger.warning(
135
- "overwrite_instructions is True. Provided instructions will be used and will modify the assistant in the API"
136
- )
137
- self._openai_assistant = update_gpt_assistant(
138
- self._openai_client,
139
- assistant_id=openai_assistant_id,
140
- assistant_config={
141
- "instructions": instructions,
142
- },
143
- )
144
- else:
145
- logger.warning(
146
- "overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API."
147
- )
148
-
149
- # Check if tools are specified in assistant_config
150
- specified_tools = openai_assistant_cfg.get("tools", None)
151
-
152
- if specified_tools is None:
153
- # Check if the current assistant has tools defined
154
- if self._openai_assistant.tools:
155
- logger.warning(
156
- "No tools were provided for given assistant. Using existing tools from assistant API."
157
- )
158
- else:
159
- logger.info(
160
- "No tools were provided for the assistant, and the assistant currently has no tools set."
161
- )
162
- elif overwrite_tools is True:
163
- # Tools are specified and overwrite_tools is True; update the assistant's tools
164
- logger.warning(
165
- "overwrite_tools is True. Provided tools will be used and will modify the assistant in the API"
166
- )
167
- self._openai_assistant = update_gpt_assistant(
168
- self._openai_client,
169
- assistant_id=openai_assistant_id,
170
- assistant_config={
171
- "tools": specified_tools,
172
- "tool_resources": openai_assistant_cfg.get("tool_resources", None),
173
- },
174
- )
175
- else:
176
- # Tools are specified but overwrite_tools is False; do not update the assistant's tools
177
- logger.warning("overwrite_tools is False. Using existing tools from assistant API.")
178
-
179
- self.update_system_message(self._openai_assistant.instructions)
180
- # lazily create threads
181
- self._openai_threads = {}
182
- self._unread_index = defaultdict(int)
183
- self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant, position=2)
184
-
185
- def _invoke_assistant(
186
- self,
187
- messages: Optional[List[Dict]] = None,
188
- sender: Optional[Agent] = None,
189
- config: Optional[Any] = None,
190
- ) -> Tuple[bool, Union[str, Dict, None]]:
191
- """
192
- Invokes the OpenAI assistant to generate a reply based on the given messages.
193
-
194
- Args:
195
- messages: A list of messages in the conversation history with the sender.
196
- sender: The agent instance that sent the message.
197
- config: Optional configuration for message processing.
198
-
199
- Returns:
200
- A tuple containing a boolean indicating success and the assistant's reply.
201
- """
202
-
203
- if messages is None:
204
- messages = self._oai_messages[sender]
205
- unread_index = self._unread_index[sender] or 0
206
- pending_messages = messages[unread_index:]
207
-
208
- # Check and initiate a new thread if necessary
209
- if self._openai_threads.get(sender, None) is None:
210
- self._openai_threads[sender] = self._openai_client.beta.threads.create(
211
- messages=[],
212
- )
213
- assistant_thread = self._openai_threads[sender]
214
- # Process each unread message
215
- for message in pending_messages:
216
- if message["content"].strip() == "":
217
- continue
218
- # Convert message roles to 'user' or 'assistant', by calling _map_role_for_api, to comply with OpenAI API spec
219
- api_role = self._map_role_for_api(message["role"])
220
- self._openai_client.beta.threads.messages.create(
221
- thread_id=assistant_thread.id,
222
- content=message["content"],
223
- role=api_role,
224
- )
225
-
226
- # Create a new run to get responses from the assistant
227
- run = self._openai_client.beta.threads.runs.create(
228
- thread_id=assistant_thread.id,
229
- assistant_id=self._openai_assistant.id,
230
- # pass the latest system message as instructions
231
- instructions=self.system_message,
232
- )
233
-
234
- run_response_messages = self._get_run_response(assistant_thread, run)
235
- assert len(run_response_messages) > 0, "No response from the assistant."
236
-
237
- response = {
238
- "role": run_response_messages[-1]["role"],
239
- "content": "",
240
- }
241
- for message in run_response_messages:
242
- # just logging or do something with the intermediate messages?
243
- # if current response is not empty and there is more, append new lines
244
- if len(response["content"]) > 0:
245
- response["content"] += "\n\n"
246
- response["content"] += message["content"]
247
-
248
- self._unread_index[sender] = len(self._oai_messages[sender]) + 1
249
- return True, response
250
-
251
- def _map_role_for_api(self, role: str) -> str:
252
- """
253
- Maps internal message roles to the roles expected by the OpenAI Assistant API.
254
-
255
- Args:
256
- role (str): The role from the internal message.
257
-
258
- Returns:
259
- str: The mapped role suitable for the API.
260
- """
261
- if role in ["function", "tool"]:
262
- return "assistant"
263
- elif role == "system":
264
- return "system"
265
- elif role == "user":
266
- return "user"
267
- elif role == "assistant":
268
- return "assistant"
269
- else:
270
- # Default to 'assistant' for any other roles not recognized by the API
271
- return "assistant"
272
-
273
- def _get_run_response(self, thread, run):
274
- """
275
- Waits for and processes the response of a run from the OpenAI assistant.
276
-
277
- Args:
278
- run: The run object initiated with the OpenAI assistant.
279
-
280
- Returns:
281
- Updated run object, status of the run, and response messages.
282
- """
283
- while True:
284
- run = self._wait_for_run(run.id, thread.id)
285
- if run.status == "completed":
286
- response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc")
287
-
288
- new_messages = []
289
- for msg in response_messages:
290
- if msg.run_id == run.id:
291
- for content in msg.content:
292
- if content.type == "text":
293
- new_messages.append(
294
- {"role": msg.role, "content": self._format_assistant_message(content.text)}
295
- )
296
- elif content.type == "image_file":
297
- new_messages.append(
298
- {
299
- "role": msg.role,
300
- "content": f"Received file id={content.image_file.file_id}",
301
- }
302
- )
303
- return new_messages
304
- elif run.status == "requires_action":
305
- actions = []
306
- for tool_call in run.required_action.submit_tool_outputs.tool_calls:
307
- function = tool_call.function
308
- is_exec_success, tool_response = self.execute_function(function.dict(), self._verbose)
309
- tool_response["metadata"] = {
310
- "tool_call_id": tool_call.id,
311
- "run_id": run.id,
312
- "thread_id": thread.id,
313
- }
314
-
315
- logger.info(
316
- "Intermediate executing(%s, Success: %s) : %s",
317
- tool_response["name"],
318
- is_exec_success,
319
- tool_response["content"],
320
- )
321
- actions.append(tool_response)
322
-
323
- submit_tool_outputs = {
324
- "tool_outputs": [
325
- {"output": action["content"], "tool_call_id": action["metadata"]["tool_call_id"]}
326
- for action in actions
327
- ],
328
- "run_id": run.id,
329
- "thread_id": thread.id,
330
- }
331
-
332
- run = self._openai_client.beta.threads.runs.submit_tool_outputs(**submit_tool_outputs)
333
- else:
334
- run_info = json.dumps(run.dict(), indent=2)
335
- raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})")
336
-
337
- def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
338
- """
339
- Waits for a run to complete or reach a final state.
340
-
341
- Args:
342
- run_id: The ID of the run.
343
- thread_id: The ID of the thread associated with the run.
344
-
345
- Returns:
346
- The updated run object after completion or reaching a final state.
347
- """
348
- in_progress = True
349
- while in_progress:
350
- run = self._openai_client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
351
- in_progress = run.status in ("in_progress", "queued")
352
- if in_progress:
353
- time.sleep(self.llm_config.get("check_every_ms", 1000) / 1000)
354
- return run
355
-
356
- def _format_assistant_message(self, message_content):
357
- """
358
- Formats the assistant's message to include annotations and citations.
359
- """
360
-
361
- annotations = message_content.annotations
362
- citations = []
363
-
364
- # Iterate over the annotations and add footnotes
365
- for index, annotation in enumerate(annotations):
366
- # Replace the text with a footnote
367
- message_content.value = message_content.value.replace(annotation.text, f" [{index}]")
368
-
369
- # Gather citations based on annotation attributes
370
- if file_citation := getattr(annotation, "file_citation", None):
371
- try:
372
- cited_file = self._openai_client.files.retrieve(file_citation.file_id)
373
- citations.append(f"[{index}] {cited_file.filename}: {file_citation.quote}")
374
- except Exception as e:
375
- logger.error(f"Error retrieving file citation: {e}")
376
- elif file_path := getattr(annotation, "file_path", None):
377
- try:
378
- cited_file = self._openai_client.files.retrieve(file_path.file_id)
379
- citations.append(f"[{index}] Click <here> to download {cited_file.filename}")
380
- except Exception as e:
381
- logger.error(f"Error retrieving file citation: {e}")
382
- # Note: File download functionality not implemented above for brevity
383
-
384
- # Add footnotes to the end of the message before displaying to user
385
- message_content.value += "\n" + "\n".join(citations)
386
- return message_content.value
387
-
388
- def can_execute_function(self, name: str) -> bool:
389
- """Whether the agent can execute the function."""
390
- return False
391
-
392
- def reset(self):
393
- """
394
- Resets the agent, clearing any existing conversation thread and unread message indices.
395
- """
396
- super().reset()
397
- for thread in self._openai_threads.values():
398
- # Delete the existing thread to start fresh in the next conversation
399
- self._openai_client.beta.threads.delete(thread.id)
400
- self._openai_threads = {}
401
- # Clear the record of unread messages
402
- self._unread_index.clear()
403
-
404
- def clear_history(self, agent: Optional[Agent] = None):
405
- """Clear the chat history of the agent.
406
-
407
- Args:
408
- agent: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
409
- """
410
- super().clear_history(agent)
411
- if self._openai_threads.get(agent, None) is not None:
412
- # Delete the existing thread to start fresh in the next conversation
413
- thread = self._openai_threads[agent]
414
- logger.info("Clearing thread %s", thread.id)
415
- self._openai_client.beta.threads.delete(thread.id)
416
- self._openai_threads.pop(agent)
417
- self._unread_index[agent] = 0
418
-
419
- def pretty_print_thread(self, thread):
420
- """Pretty print the thread."""
421
- if thread is None:
422
- print("No thread to print")
423
- return
424
- # NOTE: that list may not be in order, sorting by created_at is important
425
- messages = self._openai_client.beta.threads.messages.list(
426
- thread_id=thread.id,
427
- )
428
- messages = sorted(messages.data, key=lambda x: x.created_at)
429
- print("~~~~~~~THREAD CONTENTS~~~~~~~")
430
- for message in messages:
431
- content_types = [content.type for content in message.content]
432
- print(f"[{message.created_at}]", message.role, ": [", ", ".join(content_types), "]")
433
- for content in message.content:
434
- content_type = content.type
435
- if content_type == "text":
436
- print(content.type, ": ", content.text.value)
437
- elif content_type == "image_file":
438
- print(content.type, ": ", content.image_file.file_id)
439
- else:
440
- print(content.type, ": ", content)
441
- print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
442
-
443
- @property
444
- def oai_threads(self) -> Dict[Agent, Any]:
445
- """Return the threads of the agent."""
446
- return self._openai_threads
447
-
448
- @property
449
- def assistant_id(self):
450
- """Return the assistant id"""
451
- return self._openai_assistant.id
452
-
453
- @property
454
- def openai_client(self):
455
- return self._openai_client
456
-
457
- @property
458
- def openai_assistant(self):
459
- return self._openai_assistant
460
-
461
- def get_assistant_instructions(self):
462
- """Return the assistant instructions from OAI assistant API"""
463
- return self._openai_assistant.instructions
464
-
465
- def delete_assistant(self):
466
- """Delete the assistant from OAI assistant API"""
467
- logger.warning("Permanently deleting assistant...")
468
- self._openai_client.beta.assistants.delete(self.assistant_id)
469
-
470
- def find_matching_assistant(self, candidate_assistants, instructions, tools):
471
- """
472
- Find the matching assistant from a list of candidate assistants.
473
- Filter out candidates with the same name but different instructions, and function names.
474
- """
475
- matching_assistants = []
476
-
477
- # Preprocess the required tools for faster comparison
478
- required_tool_types = set(
479
- "file_search" if tool.get("type") in ["retrieval", "file_search"] else tool.get("type") for tool in tools
480
- )
481
-
482
- required_function_names = set(
483
- tool.get("function", {}).get("name")
484
- for tool in tools
485
- if tool.get("type") not in ["code_interpreter", "retrieval", "file_search"]
486
- )
487
-
488
- for assistant in candidate_assistants:
489
- # Check if instructions are similar
490
- if instructions and instructions != getattr(assistant, "instructions", None):
491
- logger.warning(
492
- "instructions not match, skip assistant(%s): %s",
493
- assistant.id,
494
- getattr(assistant, "instructions", None),
495
- )
496
- continue
497
-
498
- # Preprocess the assistant's tools
499
- assistant_tool_types = set(
500
- "file_search" if tool.type in ["retrieval", "file_search"] else tool.type for tool in assistant.tools
501
- )
502
- assistant_function_names = set(tool.function.name for tool in assistant.tools if hasattr(tool, "function"))
503
-
504
- # Check if the tool types, function names match
505
- if required_tool_types != assistant_tool_types or required_function_names != assistant_function_names:
506
- logger.warning(
507
- "tools not match, skip assistant(%s): tools %s, functions %s",
508
- assistant.id,
509
- assistant_tool_types,
510
- assistant_function_names,
511
- )
512
- continue
513
-
514
- # Append assistant to matching list if all conditions are met
515
- matching_assistants.append(assistant)
516
-
517
- return matching_assistants
518
-
519
- def _process_assistant_config(self, llm_config, assistant_config):
520
- """
521
- Process the llm_config and assistant_config to extract the model name and assistant related configurations.
522
- """
523
-
524
- if llm_config is False:
525
- raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")
526
-
527
- if llm_config is None:
528
- openai_client_cfg = {}
529
- else:
530
- openai_client_cfg = copy.deepcopy(llm_config)
531
-
532
- if assistant_config is None:
533
- openai_assistant_cfg = {}
534
- else:
535
- openai_assistant_cfg = copy.deepcopy(assistant_config)
536
-
537
- # Move the assistant related configurations to assistant_config
538
- # It's important to keep forward compatibility
539
- assistant_config_items = ["assistant_id", "tools", "file_ids", "tool_resources", "check_every_ms"]
540
- for item in assistant_config_items:
541
- if openai_client_cfg.get(item) is not None and openai_assistant_cfg.get(item) is None:
542
- openai_assistant_cfg[item] = openai_client_cfg[item]
543
- openai_client_cfg.pop(item, None)
544
-
545
- return openai_client_cfg, openai_assistant_cfg
File without changes
@@ -1,30 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- from dataclasses import dataclass
8
- from enum import Enum, auto
9
- from typing import Optional
10
-
11
-
12
- class DocumentType(Enum):
13
- """
14
- Enum for supporting document type.
15
- """
16
-
17
- TEXT = auto()
18
- HTML = auto()
19
- PDF = auto()
20
-
21
-
22
- @dataclass
23
- class Document:
24
- """
25
- A wrapper of graph store query results.
26
- """
27
-
28
- doctype: DocumentType
29
- data: Optional[object] = None
30
- path_or_url: Optional[str] = ""
@@ -1,111 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- import os
6
- from dataclasses import dataclass, field
7
- from typing import List
8
-
9
- from graphrag_sdk import KnowledgeGraph, Source
10
- from graphrag_sdk.model_config import KnowledgeGraphModelConfig
11
- from graphrag_sdk.models import GenerativeModel
12
- from graphrag_sdk.models.openai import OpenAiGenerativeModel
13
- from graphrag_sdk.ontology import Ontology
14
-
15
- from .document import Document
16
- from .graph_query_engine import GraphStoreQueryResult
17
-
18
-
19
- class FalkorGraphQueryEngine:
20
- """
21
- This is a wrapper for FalkorDB KnowledgeGraph.
22
- """
23
-
24
- def __init__(
25
- self,
26
- name: str,
27
- host: str = "127.0.0.1",
28
- port: int = 6379,
29
- username: str | None = None,
30
- password: str | None = None,
31
- model: GenerativeModel = OpenAiGenerativeModel("gpt-4o"),
32
- ontology: Ontology | None = None,
33
- ):
34
- """
35
- Initialize a FalkorDB knowledge graph.
36
- Please also refer to https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/kg.py
37
-
38
- Args:
39
- name (str): Knowledge graph name.
40
- host (str): FalkorDB hostname.
41
- port (int): FalkorDB port number.
42
- username (str|None): FalkorDB username.
43
- password (str|None): FalkorDB password.
44
- model (GenerativeModel): LLM model to use for FalkorDB to build and retrieve from the graph, default to use OAI gpt-4o.
45
- ontology: FalkorDB knowledge graph schema/ontology, https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/ontology.py
46
- If None, FalkorDB will auto generate an ontology from the input docs.
47
- """
48
- self.name = name
49
- self.host = host
50
- self.port = port
51
- self.username = username
52
- self.password = password
53
- self.model = model
54
- self.model_config = KnowledgeGraphModelConfig.with_model(model)
55
- self.ontology = ontology
56
-
57
- def init_db(self, input_doc: List[Document] | None):
58
- """
59
- Build the knowledge graph with input documents.
60
- """
61
- sources = []
62
- for doc in input_doc:
63
- if os.path.exists(doc.path_or_url):
64
- sources.append(Source(doc.path_or_url))
65
-
66
- if sources:
67
- # Auto generate graph ontology if not created by user.
68
- if self.ontology is None:
69
- self.ontology = Ontology.from_sources(
70
- sources=sources,
71
- model=self.model,
72
- )
73
-
74
- self.knowledge_graph = KnowledgeGraph(
75
- name=self.name,
76
- host=self.host,
77
- port=self.port,
78
- username=self.username,
79
- password=self.password,
80
- model_config=KnowledgeGraphModelConfig.with_model(self.model),
81
- ontology=self.ontology,
82
- )
83
-
84
- # Establish a chat session, this will maintain the history
85
- self._chat_session = self.knowledge_graph.chat_session()
86
- self.knowledge_graph.process_sources(sources)
87
-
88
- def add_records(self, new_records: List) -> bool:
89
- raise NotImplementedError("This method is not supported by FalkorDB SDK yet.")
90
-
91
- def query(self, question: str, n_results: int = 1, **kwargs) -> GraphStoreQueryResult:
92
- """
93
- Query the knowledge graph with a question and optional message history.
94
-
95
- Args:
96
- question: a human input question.
97
- n_results: number of returned results.
98
- kwargs:
99
- messages: a list of message history.
100
-
101
- Returns: FalkorGraphQueryResult
102
- """
103
- if self.knowledge_graph is None:
104
- raise ValueError("Knowledge graph is not created.")
105
-
106
- response = self._chat_session.send_message(question)
107
-
108
- # History will be considered when querying by setting the last_answer
109
- self._chat_session.last_answer = response["response"]
110
-
111
- return GraphStoreQueryResult(answer=response["response"], results=[])