ag2 0.4b1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (118) hide show
  1. ag2-0.4.2b1.dist-info/METADATA +19 -0
  2. ag2-0.4.2b1.dist-info/RECORD +6 -0
  3. ag2-0.4.2b1.dist-info/top_level.txt +1 -0
  4. ag2-0.4b1.dist-info/METADATA +0 -496
  5. ag2-0.4b1.dist-info/RECORD +0 -115
  6. ag2-0.4b1.dist-info/top_level.txt +0 -1
  7. autogen/__init__.py +0 -17
  8. autogen/_pydantic.py +0 -116
  9. autogen/agentchat/__init__.py +0 -42
  10. autogen/agentchat/agent.py +0 -142
  11. autogen/agentchat/assistant_agent.py +0 -85
  12. autogen/agentchat/chat.py +0 -306
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +0 -787
  15. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  16. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  17. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  18. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  19. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  20. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  21. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  22. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  23. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  24. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  25. autogen/agentchat/contrib/captainagent.py +0 -487
  26. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  27. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  28. autogen/agentchat/contrib/graph_rag/document.py +0 -24
  29. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -76
  30. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -50
  31. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -56
  32. autogen/agentchat/contrib/img_utils.py +0 -390
  33. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  34. autogen/agentchat/contrib/llava_agent.py +0 -176
  35. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  36. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  37. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  38. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  39. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -701
  40. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  41. autogen/agentchat/contrib/swarm_agent.py +0 -414
  42. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  43. autogen/agentchat/contrib/tool_retriever.py +0 -114
  44. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  45. autogen/agentchat/contrib/vectordb/base.py +0 -243
  46. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  47. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  48. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  49. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  50. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  51. autogen/agentchat/contrib/web_surfer.py +0 -305
  52. autogen/agentchat/conversable_agent.py +0 -2908
  53. autogen/agentchat/groupchat.py +0 -1668
  54. autogen/agentchat/user_proxy_agent.py +0 -109
  55. autogen/agentchat/utils.py +0 -207
  56. autogen/browser_utils.py +0 -291
  57. autogen/cache/__init__.py +0 -10
  58. autogen/cache/abstract_cache_base.py +0 -78
  59. autogen/cache/cache.py +0 -182
  60. autogen/cache/cache_factory.py +0 -85
  61. autogen/cache/cosmos_db_cache.py +0 -150
  62. autogen/cache/disk_cache.py +0 -109
  63. autogen/cache/in_memory_cache.py +0 -61
  64. autogen/cache/redis_cache.py +0 -128
  65. autogen/code_utils.py +0 -745
  66. autogen/coding/__init__.py +0 -22
  67. autogen/coding/base.py +0 -113
  68. autogen/coding/docker_commandline_code_executor.py +0 -262
  69. autogen/coding/factory.py +0 -45
  70. autogen/coding/func_with_reqs.py +0 -203
  71. autogen/coding/jupyter/__init__.py +0 -22
  72. autogen/coding/jupyter/base.py +0 -32
  73. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  74. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  75. autogen/coding/jupyter/jupyter_client.py +0 -224
  76. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  77. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  78. autogen/coding/local_commandline_code_executor.py +0 -410
  79. autogen/coding/markdown_code_extractor.py +0 -44
  80. autogen/coding/utils.py +0 -57
  81. autogen/exception_utils.py +0 -46
  82. autogen/extensions/__init__.py +0 -0
  83. autogen/formatting_utils.py +0 -76
  84. autogen/function_utils.py +0 -362
  85. autogen/graph_utils.py +0 -148
  86. autogen/io/__init__.py +0 -15
  87. autogen/io/base.py +0 -105
  88. autogen/io/console.py +0 -43
  89. autogen/io/websockets.py +0 -213
  90. autogen/logger/__init__.py +0 -11
  91. autogen/logger/base_logger.py +0 -140
  92. autogen/logger/file_logger.py +0 -287
  93. autogen/logger/logger_factory.py +0 -29
  94. autogen/logger/logger_utils.py +0 -42
  95. autogen/logger/sqlite_logger.py +0 -459
  96. autogen/math_utils.py +0 -356
  97. autogen/oai/__init__.py +0 -33
  98. autogen/oai/anthropic.py +0 -428
  99. autogen/oai/bedrock.py +0 -600
  100. autogen/oai/cerebras.py +0 -264
  101. autogen/oai/client.py +0 -1148
  102. autogen/oai/client_utils.py +0 -167
  103. autogen/oai/cohere.py +0 -453
  104. autogen/oai/completion.py +0 -1216
  105. autogen/oai/gemini.py +0 -469
  106. autogen/oai/groq.py +0 -281
  107. autogen/oai/mistral.py +0 -279
  108. autogen/oai/ollama.py +0 -576
  109. autogen/oai/openai_utils.py +0 -810
  110. autogen/oai/together.py +0 -343
  111. autogen/retrieve_utils.py +0 -487
  112. autogen/runtime_logging.py +0 -163
  113. autogen/token_count_utils.py +0 -257
  114. autogen/types.py +0 -20
  115. autogen/version.py +0 -7
  116. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
  117. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
  118. {ag2-0.4b1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
@@ -1,257 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- import json
8
- import logging
9
- import re
10
- from typing import Dict, List, Union
11
-
12
- import tiktoken
13
-
14
- try:
15
- from autogen.agentchat.contrib.img_utils import num_tokens_from_gpt_image
16
-
17
- img_util_imported = True
18
- except ImportError:
19
-
20
- def num_tokens_from_gpt_image(*args, **kwargs):
21
- return 0
22
-
23
- img_util_imported = False
24
-
25
-
26
- logger = logging.getLogger(__name__)
27
- logger.img_dependency_warned = False # member variable to track if the warning has been logged
28
-
29
-
30
- def get_max_token_limit(model: str = "gpt-3.5-turbo-0613") -> int:
31
- # Handle common azure model names/aliases
32
- model = re.sub(r"^gpt\-?35", "gpt-3.5", model)
33
- model = re.sub(r"^gpt4", "gpt-4", model)
34
-
35
- max_token_limit = {
36
- "gpt-3.5-turbo": 16385,
37
- "gpt-3.5-turbo-0125": 16385,
38
- "gpt-3.5-turbo-0301": 4096,
39
- "gpt-3.5-turbo-0613": 4096,
40
- "gpt-3.5-turbo-instruct": 4096,
41
- "gpt-3.5-turbo-16k": 16385,
42
- "gpt-3.5-turbo-16k-0613": 16385,
43
- "gpt-3.5-turbo-1106": 16385,
44
- "gpt-4": 8192,
45
- "gpt-4-turbo": 128000,
46
- "gpt-4-turbo-2024-04-09": 128000,
47
- "gpt-4-32k": 32768,
48
- "gpt-4-32k-0314": 32768, # deprecate in Sep
49
- "gpt-4-0314": 8192, # deprecate in Sep
50
- "gpt-4-0613": 8192,
51
- "gpt-4-32k-0613": 32768,
52
- "gpt-4-1106-preview": 128000,
53
- "gpt-4-0125-preview": 128000,
54
- "gpt-4-turbo-preview": 128000,
55
- "gpt-4-vision-preview": 128000,
56
- "gpt-4o": 128000,
57
- "gpt-4o-2024-05-13": 128000,
58
- "gpt-4o-2024-08-06": 128000,
59
- "gpt-4o-mini": 128000,
60
- "gpt-4o-mini-2024-07-18": 128000,
61
- }
62
- return max_token_limit[model]
63
-
64
-
65
- def percentile_used(input, model="gpt-3.5-turbo-0613"):
66
- return count_token(input) / get_max_token_limit(model)
67
-
68
-
69
- def token_left(input: Union[str, List, Dict], model="gpt-3.5-turbo-0613") -> int:
70
- """Count number of tokens left for an OpenAI model.
71
-
72
- Args:
73
- input: (str, list, dict): Input to the model.
74
- model: (str): Model name.
75
-
76
- Returns:
77
- int: Number of tokens left that the model can use for completion.
78
- """
79
- return get_max_token_limit(model) - count_token(input, model=model)
80
-
81
-
82
- def count_token(input: Union[str, List, Dict], model: str = "gpt-3.5-turbo-0613") -> int:
83
- """Count number of tokens used by an OpenAI model.
84
- Args:
85
- input: (str, list, dict): Input to the model.
86
- model: (str): Model name.
87
-
88
- Returns:
89
- int: Number of tokens from the input.
90
- """
91
- if isinstance(input, str):
92
- return _num_token_from_text(input, model=model)
93
- elif isinstance(input, list) or isinstance(input, dict):
94
- return _num_token_from_messages(input, model=model)
95
- else:
96
- raise ValueError(f"input must be str, list or dict, but we got {type(input)}")
97
-
98
-
99
- def _num_token_from_text(text: str, model: str = "gpt-3.5-turbo-0613"):
100
- """Return the number of tokens used by a string."""
101
- try:
102
- encoding = tiktoken.encoding_for_model(model)
103
- except KeyError:
104
- logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
105
- encoding = tiktoken.get_encoding("cl100k_base")
106
- return len(encoding.encode(text))
107
-
108
-
109
- def _num_token_from_messages(messages: Union[List, Dict], model="gpt-3.5-turbo-0613"):
110
- """Return the number of tokens used by a list of messages.
111
-
112
- retrieved from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb/
113
- """
114
- if isinstance(messages, dict):
115
- messages = [messages]
116
-
117
- try:
118
- encoding = tiktoken.encoding_for_model(model)
119
- except KeyError:
120
- logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
121
- encoding = tiktoken.get_encoding("cl100k_base")
122
- if model in {
123
- "gpt-3.5-turbo-0613",
124
- "gpt-3.5-turbo-16k-0613",
125
- "gpt-4-0314",
126
- "gpt-4-32k-0314",
127
- "gpt-4-0613",
128
- "gpt-4-32k-0613",
129
- "gpt-4-turbo-preview",
130
- "gpt-4-vision-preview",
131
- "gpt-4o",
132
- "gpt-4o-2024-05-13",
133
- "gpt-4o-2024-08-06",
134
- "gpt-4o-mini",
135
- "gpt-4o-mini-2024-07-18",
136
- }:
137
- tokens_per_message = 3
138
- tokens_per_name = 1
139
- elif model == "gpt-3.5-turbo-0301":
140
- tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
141
- tokens_per_name = -1 # if there's a name, the role is omitted
142
- elif "gpt-3.5-turbo" in model:
143
- logger.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
144
- return _num_token_from_messages(messages, model="gpt-3.5-turbo-0613")
145
- elif "gpt-4" in model:
146
- logger.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
147
- return _num_token_from_messages(messages, model="gpt-4-0613")
148
- elif "gemini" in model:
149
- logger.info("Gemini is not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
150
- return _num_token_from_messages(messages, model="gpt-4-0613")
151
- elif "claude" in model:
152
- logger.info("Claude is not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
153
- return _num_token_from_messages(messages, model="gpt-4-0613")
154
- elif "mistral-" in model or "mixtral-" in model:
155
- logger.info("Mistral.AI models are not supported in tiktoken. Returning num tokens assuming gpt-4-0613.")
156
- return _num_token_from_messages(messages, model="gpt-4-0613")
157
- else:
158
- raise NotImplementedError(
159
- f"""_num_token_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
160
- )
161
- num_tokens = 0
162
- for message in messages:
163
- num_tokens += tokens_per_message
164
- for key, value in message.items():
165
- if value is None:
166
- continue
167
-
168
- # handle content if images are in GPT-4-vision
169
- if key == "content" and isinstance(value, list):
170
- for part in value:
171
- if not isinstance(part, dict) or "type" not in part:
172
- continue
173
- if part["type"] == "text":
174
- num_tokens += len(encoding.encode(part["text"]))
175
- if "image_url" in part:
176
- assert "url" in part["image_url"]
177
- if not img_util_imported and not logger.img_dependency_warned:
178
- logger.warning(
179
- "img_utils or PIL not imported. Skipping image token count."
180
- "Please install autogen with [lmm] option.",
181
- )
182
- logger.img_dependency_warned = True
183
- is_low_quality = "detail" in part["image_url"] and part["image_url"]["detail"] == "low"
184
- try:
185
- num_tokens += num_tokens_from_gpt_image(
186
- image_data=part["image_url"]["url"], model=model, low_quality=is_low_quality
187
- )
188
- except ValueError as e:
189
- logger.warning(f"Error in num_tokens_from_gpt_image: {e}")
190
- continue
191
-
192
- # function calls
193
- if not isinstance(value, str):
194
- try:
195
- value = json.dumps(value)
196
- except TypeError:
197
- logger.warning(
198
- f"Value {value} is not a string and cannot be converted to json. It is a type: {type(value)} Skipping."
199
- )
200
- continue
201
-
202
- num_tokens += len(encoding.encode(value))
203
- if key == "name":
204
- num_tokens += tokens_per_name
205
- num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
206
- return num_tokens
207
-
208
-
209
- def num_tokens_from_functions(functions, model="gpt-3.5-turbo-0613") -> int:
210
- """Return the number of tokens used by a list of functions.
211
-
212
- Args:
213
- functions: (list): List of function descriptions that will be passed in model.
214
- model: (str): Model name.
215
-
216
- Returns:
217
- int: Number of tokens from the function descriptions.
218
- """
219
- try:
220
- encoding = tiktoken.encoding_for_model(model)
221
- except KeyError:
222
- logger.warning(f"Model {model} not found. Using cl100k_base encoding.")
223
- encoding = tiktoken.get_encoding("cl100k_base")
224
-
225
- num_tokens = 0
226
- for function in functions:
227
- function_tokens = len(encoding.encode(function["name"]))
228
- function_tokens += len(encoding.encode(function["description"]))
229
- function_tokens -= 2
230
- if "parameters" in function:
231
- parameters = function["parameters"]
232
- if "properties" in parameters:
233
- for propertiesKey in parameters["properties"]:
234
- function_tokens += len(encoding.encode(propertiesKey))
235
- v = parameters["properties"][propertiesKey]
236
- for field in v:
237
- if field == "type":
238
- function_tokens += 2
239
- function_tokens += len(encoding.encode(v["type"]))
240
- elif field == "description":
241
- function_tokens += 2
242
- function_tokens += len(encoding.encode(v["description"]))
243
- elif field == "enum":
244
- function_tokens -= 3
245
- for o in v["enum"]:
246
- function_tokens += 3
247
- function_tokens += len(encoding.encode(o))
248
- else:
249
- logger.warning(f"Not supported field {field}")
250
- function_tokens += 11
251
- if len(parameters["properties"]) == 0:
252
- function_tokens -= 2
253
-
254
- num_tokens += function_tokens
255
-
256
- num_tokens += 12
257
- return num_tokens
autogen/types.py DELETED
@@ -1,20 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- from typing import Dict, List, Literal, TypedDict, Union
8
-
9
- MessageContentType = Union[str, List[Union[Dict, str]], None]
10
-
11
-
12
- class UserMessageTextContentPart(TypedDict):
13
- type: Literal["text"]
14
- text: str
15
-
16
-
17
- class UserMessageImageContentPart(TypedDict):
18
- type: Literal["image_url"]
19
- # Ignoring the other "detail param for now"
20
- image_url: Dict[Literal["url"], str]
autogen/version.py DELETED
@@ -1,7 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- __version__ = "0.4b1"
File without changes
File without changes
File without changes