ag2 0.4b1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (118) hide show
  1. ag2-0.4.2b1.dist-info/METADATA +19 -0
  2. ag2-0.4.2b1.dist-info/RECORD +6 -0
  3. ag2-0.4.2b1.dist-info/top_level.txt +1 -0
  4. ag2-0.4b1.dist-info/METADATA +0 -496
  5. ag2-0.4b1.dist-info/RECORD +0 -115
  6. ag2-0.4b1.dist-info/top_level.txt +0 -1
  7. autogen/__init__.py +0 -17
  8. autogen/_pydantic.py +0 -116
  9. autogen/agentchat/__init__.py +0 -42
  10. autogen/agentchat/agent.py +0 -142
  11. autogen/agentchat/assistant_agent.py +0 -85
  12. autogen/agentchat/chat.py +0 -306
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +0 -787
  15. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  16. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  17. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  18. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  19. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  20. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  21. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  22. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  23. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  24. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  25. autogen/agentchat/contrib/captainagent.py +0 -487
  26. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  27. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  28. autogen/agentchat/contrib/graph_rag/document.py +0 -24
  29. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -76
  30. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -50
  31. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -56
  32. autogen/agentchat/contrib/img_utils.py +0 -390
  33. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  34. autogen/agentchat/contrib/llava_agent.py +0 -176
  35. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  36. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  37. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  38. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  39. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -701
  40. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  41. autogen/agentchat/contrib/swarm_agent.py +0 -414
  42. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  43. autogen/agentchat/contrib/tool_retriever.py +0 -114
  44. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  45. autogen/agentchat/contrib/vectordb/base.py +0 -243
  46. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  47. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  48. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  49. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  50. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  51. autogen/agentchat/contrib/web_surfer.py +0 -305
  52. autogen/agentchat/conversable_agent.py +0 -2908
  53. autogen/agentchat/groupchat.py +0 -1668
  54. autogen/agentchat/user_proxy_agent.py +0 -109
  55. autogen/agentchat/utils.py +0 -207
  56. autogen/browser_utils.py +0 -291
  57. autogen/cache/__init__.py +0 -10
  58. autogen/cache/abstract_cache_base.py +0 -78
  59. autogen/cache/cache.py +0 -182
  60. autogen/cache/cache_factory.py +0 -85
  61. autogen/cache/cosmos_db_cache.py +0 -150
  62. autogen/cache/disk_cache.py +0 -109
  63. autogen/cache/in_memory_cache.py +0 -61
  64. autogen/cache/redis_cache.py +0 -128
  65. autogen/code_utils.py +0 -745
  66. autogen/coding/__init__.py +0 -22
  67. autogen/coding/base.py +0 -113
  68. autogen/coding/docker_commandline_code_executor.py +0 -262
  69. autogen/coding/factory.py +0 -45
  70. autogen/coding/func_with_reqs.py +0 -203
  71. autogen/coding/jupyter/__init__.py +0 -22
  72. autogen/coding/jupyter/base.py +0 -32
  73. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  74. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  75. autogen/coding/jupyter/jupyter_client.py +0 -224
  76. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  77. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  78. autogen/coding/local_commandline_code_executor.py +0 -410
  79. autogen/coding/markdown_code_extractor.py +0 -44
  80. autogen/coding/utils.py +0 -57
  81. autogen/exception_utils.py +0 -46
  82. autogen/extensions/__init__.py +0 -0
  83. autogen/formatting_utils.py +0 -76
  84. autogen/function_utils.py +0 -362
  85. autogen/graph_utils.py +0 -148
  86. autogen/io/__init__.py +0 -15
  87. autogen/io/base.py +0 -105
  88. autogen/io/console.py +0 -43
  89. autogen/io/websockets.py +0 -213
  90. autogen/logger/__init__.py +0 -11
  91. autogen/logger/base_logger.py +0 -140
  92. autogen/logger/file_logger.py +0 -287
  93. autogen/logger/logger_factory.py +0 -29
  94. autogen/logger/logger_utils.py +0 -42
  95. autogen/logger/sqlite_logger.py +0 -459
  96. autogen/math_utils.py +0 -356
  97. autogen/oai/__init__.py +0 -33
  98. autogen/oai/anthropic.py +0 -428
  99. autogen/oai/bedrock.py +0 -600
  100. autogen/oai/cerebras.py +0 -264
  101. autogen/oai/client.py +0 -1148
  102. autogen/oai/client_utils.py +0 -167
  103. autogen/oai/cohere.py +0 -453
  104. autogen/oai/completion.py +0 -1216
  105. autogen/oai/gemini.py +0 -469
  106. autogen/oai/groq.py +0 -281
  107. autogen/oai/mistral.py +0 -279
  108. autogen/oai/ollama.py +0 -576
  109. autogen/oai/openai_utils.py +0 -810
  110. autogen/oai/together.py +0 -343
  111. autogen/retrieve_utils.py +0 -487
  112. autogen/runtime_logging.py +0 -163
  113. autogen/token_count_utils.py +0 -257
  114. autogen/types.py +0 -20
  115. autogen/version.py +0 -7
  116. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
  117. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
  118. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
@@ -1,545 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import copy
8
- import json
9
- import logging
10
- import time
11
- from collections import defaultdict
12
- from typing import Any, Dict, List, Optional, Tuple, Union
13
-
14
- from autogen import OpenAIWrapper
15
- from autogen.agentchat.agent import Agent
16
- from autogen.agentchat.assistant_agent import AssistantAgent, ConversableAgent
17
- from autogen.oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant
18
- from autogen.runtime_logging import log_new_agent, logging_enabled
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- class GPTAssistantAgent(ConversableAgent):
24
- """
25
- An experimental AutoGen agent class that leverages the OpenAI Assistant API for conversational capabilities.
26
- This agent is unique in its reliance on the OpenAI Assistant for state management, differing from other agents like ConversableAgent.
27
- """
28
-
29
- DEFAULT_MODEL_NAME = "gpt-4-0125-preview"
30
-
31
- def __init__(
32
- self,
33
- name="GPT Assistant",
34
- instructions: Optional[str] = None,
35
- llm_config: Optional[Union[Dict, bool]] = None,
36
- assistant_config: Optional[Dict] = None,
37
- overwrite_instructions: bool = False,
38
- overwrite_tools: bool = False,
39
- **kwargs,
40
- ):
41
- """
42
- Args:
43
- name (str): name of the agent. It will be used to find the existing assistant by name. Please remember to delete an old assistant with the same name if you intend to create a new assistant with the same name.
44
- instructions (str): instructions for the OpenAI assistant configuration.
45
- When instructions is not None, the system message of the agent will be
46
- set to the provided instructions and used in the assistant run, irrespective
47
- of the overwrite_instructions flag. But when instructions is None,
48
- and the assistant does not exist, the system message will be set to
49
- AssistantAgent.DEFAULT_SYSTEM_MESSAGE. If the assistant exists, the
50
- system message will be set to the existing assistant instructions.
51
- llm_config (dict or False): llm inference configuration.
52
- - model: Model to use for the assistant (gpt-4-1106-preview, gpt-3.5-turbo-1106).
53
- assistant_config
54
- - assistant_id: ID of the assistant to use. If None, a new assistant will be created.
55
- - check_every_ms: check thread run status interval
56
- - tools: Give Assistants access to OpenAI-hosted tools like Code Interpreter and Knowledge Retrieval,
57
- or build your own tools using Function calling. ref https://platform.openai.com/docs/assistants/tools
58
- - file_ids: (Deprecated) files used by retrieval in run. It is Deprecated, use tool_resources instead. https://platform.openai.com/docs/assistants/migration/what-has-changed.
59
- - tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool.
60
- overwrite_instructions (bool): whether to overwrite the instructions of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
61
- overwrite_tools (bool): whether to overwrite the tools of an existing assistant. This parameter is in effect only when assistant_id is specified in llm_config.
62
- kwargs (dict): Additional configuration options for the agent.
63
- - verbose (bool): If set to True, enables more detailed output from the assistant thread.
64
- - Other kwargs: Except verbose, others are passed directly to ConversableAgent.
65
- """
66
-
67
- self._verbose = kwargs.pop("verbose", False)
68
- openai_client_cfg, openai_assistant_cfg = self._process_assistant_config(llm_config, assistant_config)
69
-
70
- super().__init__(
71
- name=name, system_message=instructions, human_input_mode="NEVER", llm_config=openai_client_cfg, **kwargs
72
- )
73
- if logging_enabled():
74
- log_new_agent(self, locals())
75
-
76
- # GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list()
77
- # See: https://github.com/microsoft/autogen/pull/1721
78
- model_name = self.DEFAULT_MODEL_NAME
79
- if openai_client_cfg.get("config_list") is not None and len(openai_client_cfg["config_list"]) > 0:
80
- model_name = openai_client_cfg["config_list"][0].pop("model", self.DEFAULT_MODEL_NAME)
81
- else:
82
- model_name = openai_client_cfg.pop("model", self.DEFAULT_MODEL_NAME)
83
-
84
- logger.warning("OpenAI client config of GPTAssistantAgent(%s) - model: %s", name, model_name)
85
-
86
- oai_wrapper = OpenAIWrapper(**openai_client_cfg)
87
- if len(oai_wrapper._clients) > 1:
88
- logger.warning("GPT Assistant only supports one OpenAI client. Using the first client in the list.")
89
-
90
- self._openai_client = oai_wrapper._clients[0]._oai_client
91
- openai_assistant_id = openai_assistant_cfg.get("assistant_id", None)
92
- if openai_assistant_id is None:
93
- # try to find assistant by name first
94
- candidate_assistants = retrieve_assistants_by_name(self._openai_client, name)
95
- if len(candidate_assistants) > 0:
96
- # Filter out candidates with the same name but different instructions, file IDs, and function names.
97
- candidate_assistants = self.find_matching_assistant(
98
- candidate_assistants,
99
- instructions,
100
- openai_assistant_cfg.get("tools", []),
101
- )
102
-
103
- if len(candidate_assistants) == 0:
104
- logger.warning("No matching assistant found, creating a new assistant")
105
- # create a new assistant
106
- if instructions is None:
107
- logger.warning(
108
- "No instructions were provided for new assistant. Using default instructions from AssistantAgent.DEFAULT_SYSTEM_MESSAGE."
109
- )
110
- instructions = AssistantAgent.DEFAULT_SYSTEM_MESSAGE
111
- self._openai_assistant = create_gpt_assistant(
112
- self._openai_client,
113
- name=name,
114
- instructions=instructions,
115
- model=model_name,
116
- assistant_config=openai_assistant_cfg,
117
- )
118
- else:
119
- logger.warning(
120
- "Matching assistant found, using the first matching assistant: %s",
121
- candidate_assistants[0].__dict__,
122
- )
123
- self._openai_assistant = candidate_assistants[0]
124
- else:
125
- # retrieve an existing assistant
126
- self._openai_assistant = self._openai_client.beta.assistants.retrieve(openai_assistant_id)
127
- # if no instructions are provided, set the instructions to the existing instructions
128
- if instructions is None:
129
- logger.warning(
130
- "No instructions were provided for given assistant. Using existing instructions from assistant API."
131
- )
132
- instructions = self.get_assistant_instructions()
133
- elif overwrite_instructions is True:
134
- logger.warning(
135
- "overwrite_instructions is True. Provided instructions will be used and will modify the assistant in the API"
136
- )
137
- self._openai_assistant = update_gpt_assistant(
138
- self._openai_client,
139
- assistant_id=openai_assistant_id,
140
- assistant_config={
141
- "instructions": instructions,
142
- },
143
- )
144
- else:
145
- logger.warning(
146
- "overwrite_instructions is False. Provided instructions will be used without permanently modifying the assistant in the API."
147
- )
148
-
149
- # Check if tools are specified in assistant_config
150
- specified_tools = openai_assistant_cfg.get("tools", None)
151
-
152
- if specified_tools is None:
153
- # Check if the current assistant has tools defined
154
- if self._openai_assistant.tools:
155
- logger.warning(
156
- "No tools were provided for given assistant. Using existing tools from assistant API."
157
- )
158
- else:
159
- logger.info(
160
- "No tools were provided for the assistant, and the assistant currently has no tools set."
161
- )
162
- elif overwrite_tools is True:
163
- # Tools are specified and overwrite_tools is True; update the assistant's tools
164
- logger.warning(
165
- "overwrite_tools is True. Provided tools will be used and will modify the assistant in the API"
166
- )
167
- self._openai_assistant = update_gpt_assistant(
168
- self._openai_client,
169
- assistant_id=openai_assistant_id,
170
- assistant_config={
171
- "tools": specified_tools,
172
- "tool_resources": openai_assistant_cfg.get("tool_resources", None),
173
- },
174
- )
175
- else:
176
- # Tools are specified but overwrite_tools is False; do not update the assistant's tools
177
- logger.warning("overwrite_tools is False. Using existing tools from assistant API.")
178
-
179
- self.update_system_message(self._openai_assistant.instructions)
180
- # lazily create threads
181
- self._openai_threads = {}
182
- self._unread_index = defaultdict(int)
183
- self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant, position=2)
184
-
185
- def _invoke_assistant(
186
- self,
187
- messages: Optional[List[Dict]] = None,
188
- sender: Optional[Agent] = None,
189
- config: Optional[Any] = None,
190
- ) -> Tuple[bool, Union[str, Dict, None]]:
191
- """
192
- Invokes the OpenAI assistant to generate a reply based on the given messages.
193
-
194
- Args:
195
- messages: A list of messages in the conversation history with the sender.
196
- sender: The agent instance that sent the message.
197
- config: Optional configuration for message processing.
198
-
199
- Returns:
200
- A tuple containing a boolean indicating success and the assistant's reply.
201
- """
202
-
203
- if messages is None:
204
- messages = self._oai_messages[sender]
205
- unread_index = self._unread_index[sender] or 0
206
- pending_messages = messages[unread_index:]
207
-
208
- # Check and initiate a new thread if necessary
209
- if self._openai_threads.get(sender, None) is None:
210
- self._openai_threads[sender] = self._openai_client.beta.threads.create(
211
- messages=[],
212
- )
213
- assistant_thread = self._openai_threads[sender]
214
- # Process each unread message
215
- for message in pending_messages:
216
- if message["content"].strip() == "":
217
- continue
218
- # Convert message roles to 'user' or 'assistant', by calling _map_role_for_api, to comply with OpenAI API spec
219
- api_role = self._map_role_for_api(message["role"])
220
- self._openai_client.beta.threads.messages.create(
221
- thread_id=assistant_thread.id,
222
- content=message["content"],
223
- role=api_role,
224
- )
225
-
226
- # Create a new run to get responses from the assistant
227
- run = self._openai_client.beta.threads.runs.create(
228
- thread_id=assistant_thread.id,
229
- assistant_id=self._openai_assistant.id,
230
- # pass the latest system message as instructions
231
- instructions=self.system_message,
232
- )
233
-
234
- run_response_messages = self._get_run_response(assistant_thread, run)
235
- assert len(run_response_messages) > 0, "No response from the assistant."
236
-
237
- response = {
238
- "role": run_response_messages[-1]["role"],
239
- "content": "",
240
- }
241
- for message in run_response_messages:
242
- # just logging or do something with the intermediate messages?
243
- # if current response is not empty and there is more, append new lines
244
- if len(response["content"]) > 0:
245
- response["content"] += "\n\n"
246
- response["content"] += message["content"]
247
-
248
- self._unread_index[sender] = len(self._oai_messages[sender]) + 1
249
- return True, response
250
-
251
- def _map_role_for_api(self, role: str) -> str:
252
- """
253
- Maps internal message roles to the roles expected by the OpenAI Assistant API.
254
-
255
- Args:
256
- role (str): The role from the internal message.
257
-
258
- Returns:
259
- str: The mapped role suitable for the API.
260
- """
261
- if role in ["function", "tool"]:
262
- return "assistant"
263
- elif role == "system":
264
- return "system"
265
- elif role == "user":
266
- return "user"
267
- elif role == "assistant":
268
- return "assistant"
269
- else:
270
- # Default to 'assistant' for any other roles not recognized by the API
271
- return "assistant"
272
-
273
- def _get_run_response(self, thread, run):
274
- """
275
- Waits for and processes the response of a run from the OpenAI assistant.
276
-
277
- Args:
278
- run: The run object initiated with the OpenAI assistant.
279
-
280
- Returns:
281
- Updated run object, status of the run, and response messages.
282
- """
283
- while True:
284
- run = self._wait_for_run(run.id, thread.id)
285
- if run.status == "completed":
286
- response_messages = self._openai_client.beta.threads.messages.list(thread.id, order="asc")
287
-
288
- new_messages = []
289
- for msg in response_messages:
290
- if msg.run_id == run.id:
291
- for content in msg.content:
292
- if content.type == "text":
293
- new_messages.append(
294
- {"role": msg.role, "content": self._format_assistant_message(content.text)}
295
- )
296
- elif content.type == "image_file":
297
- new_messages.append(
298
- {
299
- "role": msg.role,
300
- "content": f"Received file id={content.image_file.file_id}",
301
- }
302
- )
303
- return new_messages
304
- elif run.status == "requires_action":
305
- actions = []
306
- for tool_call in run.required_action.submit_tool_outputs.tool_calls:
307
- function = tool_call.function
308
- is_exec_success, tool_response = self.execute_function(function.dict(), self._verbose)
309
- tool_response["metadata"] = {
310
- "tool_call_id": tool_call.id,
311
- "run_id": run.id,
312
- "thread_id": thread.id,
313
- }
314
-
315
- logger.info(
316
- "Intermediate executing(%s, Success: %s) : %s",
317
- tool_response["name"],
318
- is_exec_success,
319
- tool_response["content"],
320
- )
321
- actions.append(tool_response)
322
-
323
- submit_tool_outputs = {
324
- "tool_outputs": [
325
- {"output": action["content"], "tool_call_id": action["metadata"]["tool_call_id"]}
326
- for action in actions
327
- ],
328
- "run_id": run.id,
329
- "thread_id": thread.id,
330
- }
331
-
332
- run = self._openai_client.beta.threads.runs.submit_tool_outputs(**submit_tool_outputs)
333
- else:
334
- run_info = json.dumps(run.dict(), indent=2)
335
- raise ValueError(f"Unexpected run status: {run.status}. Full run info:\n\n{run_info})")
336
-
337
- def _wait_for_run(self, run_id: str, thread_id: str) -> Any:
338
- """
339
- Waits for a run to complete or reach a final state.
340
-
341
- Args:
342
- run_id: The ID of the run.
343
- thread_id: The ID of the thread associated with the run.
344
-
345
- Returns:
346
- The updated run object after completion or reaching a final state.
347
- """
348
- in_progress = True
349
- while in_progress:
350
- run = self._openai_client.beta.threads.runs.retrieve(run_id, thread_id=thread_id)
351
- in_progress = run.status in ("in_progress", "queued")
352
- if in_progress:
353
- time.sleep(self.llm_config.get("check_every_ms", 1000) / 1000)
354
- return run
355
-
356
- def _format_assistant_message(self, message_content):
357
- """
358
- Formats the assistant's message to include annotations and citations.
359
- """
360
-
361
- annotations = message_content.annotations
362
- citations = []
363
-
364
- # Iterate over the annotations and add footnotes
365
- for index, annotation in enumerate(annotations):
366
- # Replace the text with a footnote
367
- message_content.value = message_content.value.replace(annotation.text, f" [{index}]")
368
-
369
- # Gather citations based on annotation attributes
370
- if file_citation := getattr(annotation, "file_citation", None):
371
- try:
372
- cited_file = self._openai_client.files.retrieve(file_citation.file_id)
373
- citations.append(f"[{index}] {cited_file.filename}: {file_citation.quote}")
374
- except Exception as e:
375
- logger.error(f"Error retrieving file citation: {e}")
376
- elif file_path := getattr(annotation, "file_path", None):
377
- try:
378
- cited_file = self._openai_client.files.retrieve(file_path.file_id)
379
- citations.append(f"[{index}] Click <here> to download {cited_file.filename}")
380
- except Exception as e:
381
- logger.error(f"Error retrieving file citation: {e}")
382
- # Note: File download functionality not implemented above for brevity
383
-
384
- # Add footnotes to the end of the message before displaying to user
385
- message_content.value += "\n" + "\n".join(citations)
386
- return message_content.value
387
-
388
- def can_execute_function(self, name: str) -> bool:
389
- """Whether the agent can execute the function."""
390
- return False
391
-
392
- def reset(self):
393
- """
394
- Resets the agent, clearing any existing conversation thread and unread message indices.
395
- """
396
- super().reset()
397
- for thread in self._openai_threads.values():
398
- # Delete the existing thread to start fresh in the next conversation
399
- self._openai_client.beta.threads.delete(thread.id)
400
- self._openai_threads = {}
401
- # Clear the record of unread messages
402
- self._unread_index.clear()
403
-
404
- def clear_history(self, agent: Optional[Agent] = None):
405
- """Clear the chat history of the agent.
406
-
407
- Args:
408
- agent: the agent with whom the chat history to clear. If None, clear the chat history with all agents.
409
- """
410
- super().clear_history(agent)
411
- if self._openai_threads.get(agent, None) is not None:
412
- # Delete the existing thread to start fresh in the next conversation
413
- thread = self._openai_threads[agent]
414
- logger.info("Clearing thread %s", thread.id)
415
- self._openai_client.beta.threads.delete(thread.id)
416
- self._openai_threads.pop(agent)
417
- self._unread_index[agent] = 0
418
-
419
- def pretty_print_thread(self, thread):
420
- """Pretty print the thread."""
421
- if thread is None:
422
- print("No thread to print")
423
- return
424
- # NOTE: that list may not be in order, sorting by created_at is important
425
- messages = self._openai_client.beta.threads.messages.list(
426
- thread_id=thread.id,
427
- )
428
- messages = sorted(messages.data, key=lambda x: x.created_at)
429
- print("~~~~~~~THREAD CONTENTS~~~~~~~")
430
- for message in messages:
431
- content_types = [content.type for content in message.content]
432
- print(f"[{message.created_at}]", message.role, ": [", ", ".join(content_types), "]")
433
- for content in message.content:
434
- content_type = content.type
435
- if content_type == "text":
436
- print(content.type, ": ", content.text.value)
437
- elif content_type == "image_file":
438
- print(content.type, ": ", content.image_file.file_id)
439
- else:
440
- print(content.type, ": ", content)
441
- print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
442
-
443
- @property
444
- def oai_threads(self) -> Dict[Agent, Any]:
445
- """Return the threads of the agent."""
446
- return self._openai_threads
447
-
448
- @property
449
- def assistant_id(self):
450
- """Return the assistant id"""
451
- return self._openai_assistant.id
452
-
453
- @property
454
- def openai_client(self):
455
- return self._openai_client
456
-
457
- @property
458
- def openai_assistant(self):
459
- return self._openai_assistant
460
-
461
- def get_assistant_instructions(self):
462
- """Return the assistant instructions from OAI assistant API"""
463
- return self._openai_assistant.instructions
464
-
465
- def delete_assistant(self):
466
- """Delete the assistant from OAI assistant API"""
467
- logger.warning("Permanently deleting assistant...")
468
- self._openai_client.beta.assistants.delete(self.assistant_id)
469
-
470
- def find_matching_assistant(self, candidate_assistants, instructions, tools):
471
- """
472
- Find the matching assistant from a list of candidate assistants.
473
- Filter out candidates with the same name but different instructions, and function names.
474
- """
475
- matching_assistants = []
476
-
477
- # Preprocess the required tools for faster comparison
478
- required_tool_types = set(
479
- "file_search" if tool.get("type") in ["retrieval", "file_search"] else tool.get("type") for tool in tools
480
- )
481
-
482
- required_function_names = set(
483
- tool.get("function", {}).get("name")
484
- for tool in tools
485
- if tool.get("type") not in ["code_interpreter", "retrieval", "file_search"]
486
- )
487
-
488
- for assistant in candidate_assistants:
489
- # Check if instructions are similar
490
- if instructions and instructions != getattr(assistant, "instructions", None):
491
- logger.warning(
492
- "instructions not match, skip assistant(%s): %s",
493
- assistant.id,
494
- getattr(assistant, "instructions", None),
495
- )
496
- continue
497
-
498
- # Preprocess the assistant's tools
499
- assistant_tool_types = set(
500
- "file_search" if tool.type in ["retrieval", "file_search"] else tool.type for tool in assistant.tools
501
- )
502
- assistant_function_names = set(tool.function.name for tool in assistant.tools if hasattr(tool, "function"))
503
-
504
- # Check if the tool types, function names match
505
- if required_tool_types != assistant_tool_types or required_function_names != assistant_function_names:
506
- logger.warning(
507
- "tools not match, skip assistant(%s): tools %s, functions %s",
508
- assistant.id,
509
- assistant_tool_types,
510
- assistant_function_names,
511
- )
512
- continue
513
-
514
- # Append assistant to matching list if all conditions are met
515
- matching_assistants.append(assistant)
516
-
517
- return matching_assistants
518
-
519
- def _process_assistant_config(self, llm_config, assistant_config):
520
- """
521
- Process the llm_config and assistant_config to extract the model name and assistant related configurations.
522
- """
523
-
524
- if llm_config is False:
525
- raise ValueError("llm_config=False is not supported for GPTAssistantAgent.")
526
-
527
- if llm_config is None:
528
- openai_client_cfg = {}
529
- else:
530
- openai_client_cfg = copy.deepcopy(llm_config)
531
-
532
- if assistant_config is None:
533
- openai_assistant_cfg = {}
534
- else:
535
- openai_assistant_cfg = copy.deepcopy(assistant_config)
536
-
537
- # Move the assistant related configurations to assistant_config
538
- # It's important to keep forward compatibility
539
- assistant_config_items = ["assistant_id", "tools", "file_ids", "tool_resources", "check_every_ms"]
540
- for item in assistant_config_items:
541
- if openai_client_cfg.get(item) is not None and openai_assistant_cfg.get(item) is None:
542
- openai_assistant_cfg[item] = openai_client_cfg[item]
543
- openai_client_cfg.pop(item, None)
544
-
545
- return openai_client_cfg, openai_assistant_cfg
File without changes
@@ -1,24 +0,0 @@
1
- from dataclasses import dataclass
2
- from enum import Enum, auto
3
- from typing import Optional
4
-
5
-
6
- class DocumentType(Enum):
7
- """
8
- Enum for supporting document type.
9
- """
10
-
11
- TEXT = auto()
12
- HTML = auto()
13
- PDF = auto()
14
-
15
-
16
- @dataclass
17
- class Document:
18
- """
19
- A wrapper of graph store query results.
20
- """
21
-
22
- doctype: DocumentType
23
- data: Optional[object] = None
24
- path_or_url: Optional[str] = ""
@@ -1,76 +0,0 @@
1
- import os
2
- from dataclasses import field
3
- from typing import List
4
-
5
- from graphrag_sdk import KnowledgeGraph, Source
6
- from graphrag_sdk.schema import Schema
7
-
8
- from .document import Document
9
- from .graph_query_engine import GraphStoreQueryResult
10
-
11
-
12
- class FalkorGraphQueryResult(GraphStoreQueryResult):
13
- messages: list = field(default_factory=list)
14
-
15
-
16
- class FalkorGraphQueryEngine:
17
- """
18
- This is a wrapper for Falkor DB KnowledgeGraph.
19
- """
20
-
21
- def __init__(
22
- self,
23
- name: str,
24
- host: str = "127.0.0.1",
25
- port: int = 6379,
26
- username: str | None = None,
27
- password: str | None = None,
28
- model: str = "gpt-4-1106-preview",
29
- schema: Schema | None = None,
30
- ):
31
- """
32
- Initialize a Falkor DB knowledge graph.
33
- Please also refer to https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/kg.py
34
-
35
- Args:
36
- name (str): Knowledge graph name.
37
- host (str): FalkorDB hostname.
38
- port (int): FalkorDB port number.
39
- username (str|None): FalkorDB username.
40
- password (str|None): FalkorDB password.
41
- model (str): OpenAI model to use for Falkor DB to build and retrieve from the graph.
42
- schema: Falkor DB knowledge graph schema (ontology), https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/schema/schema.py
43
- If None, Falkor DB will auto generate a schema from the input docs.
44
- """
45
- self.knowledge_graph = KnowledgeGraph(name, host, port, username, password, model, schema)
46
-
47
- def init_db(self, input_doc: List[Document] | None):
48
- """
49
- Build the knowledge graph with input documents.
50
- """
51
- sources = []
52
- for doc in input_doc:
53
- if os.path.exists(doc.path_or_url):
54
- sources.append(Source(doc.path_or_url))
55
-
56
- if sources:
57
- self.knowledge_graph.process_sources(sources)
58
-
59
- def add_records(self, new_records: List) -> bool:
60
- raise NotImplementedError("This method is not supported by Falkor DB SDK yet.")
61
-
62
- def query(self, question: str, n_results: int = 1, **kwargs) -> FalkorGraphQueryResult:
63
- """
64
- Query the knowledage graph with a question and optional message history.
65
-
66
- Args:
67
- question: a human input question.
68
- n_results: number of returned results.
69
- kwargs:
70
- messages: a list of message history.
71
-
72
- Returns: FalkorGraphQueryResult
73
- """
74
- messages = kwargs.pop("messages", [])
75
- answer, messages = self.knowledge_graph.ask(question, messages)
76
- return FalkorGraphQueryResult(answer=answer, results=[], messages=messages)