ag2 0.4.1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (161) hide show
  1. ag2-0.4.2b1.dist-info/METADATA +19 -0
  2. ag2-0.4.2b1.dist-info/RECORD +6 -0
  3. ag2-0.4.2b1.dist-info/top_level.txt +1 -0
  4. ag2-0.4.1.dist-info/METADATA +0 -500
  5. ag2-0.4.1.dist-info/RECORD +0 -158
  6. ag2-0.4.1.dist-info/top_level.txt +0 -1
  7. autogen/__init__.py +0 -17
  8. autogen/_pydantic.py +0 -116
  9. autogen/agentchat/__init__.py +0 -42
  10. autogen/agentchat/agent.py +0 -142
  11. autogen/agentchat/assistant_agent.py +0 -85
  12. autogen/agentchat/chat.py +0 -306
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +0 -788
  15. autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
  16. autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
  17. autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
  18. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
  19. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
  20. autogen/agentchat/contrib/agent_eval/task.py +0 -43
  21. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  22. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  23. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  24. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  25. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  26. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  27. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  28. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  29. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  30. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  31. autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
  32. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
  33. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
  34. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
  35. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
  36. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
  37. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
  38. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
  39. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
  40. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
  41. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
  42. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
  43. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
  44. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
  45. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
  49. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
  50. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
  51. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
  52. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
  53. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
  54. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
  55. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
  56. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
  57. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
  58. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
  59. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
  60. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
  61. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
  62. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
  63. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
  64. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
  65. autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
  66. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
  67. autogen/agentchat/contrib/captainagent.py +0 -490
  68. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  69. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  70. autogen/agentchat/contrib/graph_rag/document.py +0 -30
  71. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
  72. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
  73. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
  74. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
  75. autogen/agentchat/contrib/img_utils.py +0 -390
  76. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  77. autogen/agentchat/contrib/llava_agent.py +0 -176
  78. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  79. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  80. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  81. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  82. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
  83. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  84. autogen/agentchat/contrib/swarm_agent.py +0 -463
  85. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  86. autogen/agentchat/contrib/tool_retriever.py +0 -120
  87. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  88. autogen/agentchat/contrib/vectordb/base.py +0 -243
  89. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  90. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  91. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  92. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  93. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  94. autogen/agentchat/contrib/web_surfer.py +0 -305
  95. autogen/agentchat/conversable_agent.py +0 -2908
  96. autogen/agentchat/groupchat.py +0 -1668
  97. autogen/agentchat/user_proxy_agent.py +0 -109
  98. autogen/agentchat/utils.py +0 -207
  99. autogen/browser_utils.py +0 -291
  100. autogen/cache/__init__.py +0 -10
  101. autogen/cache/abstract_cache_base.py +0 -78
  102. autogen/cache/cache.py +0 -182
  103. autogen/cache/cache_factory.py +0 -85
  104. autogen/cache/cosmos_db_cache.py +0 -150
  105. autogen/cache/disk_cache.py +0 -109
  106. autogen/cache/in_memory_cache.py +0 -61
  107. autogen/cache/redis_cache.py +0 -128
  108. autogen/code_utils.py +0 -745
  109. autogen/coding/__init__.py +0 -22
  110. autogen/coding/base.py +0 -113
  111. autogen/coding/docker_commandline_code_executor.py +0 -262
  112. autogen/coding/factory.py +0 -45
  113. autogen/coding/func_with_reqs.py +0 -203
  114. autogen/coding/jupyter/__init__.py +0 -22
  115. autogen/coding/jupyter/base.py +0 -32
  116. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  117. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  118. autogen/coding/jupyter/jupyter_client.py +0 -224
  119. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  120. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  121. autogen/coding/local_commandline_code_executor.py +0 -410
  122. autogen/coding/markdown_code_extractor.py +0 -44
  123. autogen/coding/utils.py +0 -57
  124. autogen/exception_utils.py +0 -46
  125. autogen/extensions/__init__.py +0 -0
  126. autogen/formatting_utils.py +0 -76
  127. autogen/function_utils.py +0 -362
  128. autogen/graph_utils.py +0 -148
  129. autogen/io/__init__.py +0 -15
  130. autogen/io/base.py +0 -105
  131. autogen/io/console.py +0 -43
  132. autogen/io/websockets.py +0 -213
  133. autogen/logger/__init__.py +0 -11
  134. autogen/logger/base_logger.py +0 -140
  135. autogen/logger/file_logger.py +0 -287
  136. autogen/logger/logger_factory.py +0 -29
  137. autogen/logger/logger_utils.py +0 -42
  138. autogen/logger/sqlite_logger.py +0 -459
  139. autogen/math_utils.py +0 -356
  140. autogen/oai/__init__.py +0 -33
  141. autogen/oai/anthropic.py +0 -428
  142. autogen/oai/bedrock.py +0 -606
  143. autogen/oai/cerebras.py +0 -270
  144. autogen/oai/client.py +0 -1148
  145. autogen/oai/client_utils.py +0 -167
  146. autogen/oai/cohere.py +0 -453
  147. autogen/oai/completion.py +0 -1216
  148. autogen/oai/gemini.py +0 -469
  149. autogen/oai/groq.py +0 -281
  150. autogen/oai/mistral.py +0 -279
  151. autogen/oai/ollama.py +0 -582
  152. autogen/oai/openai_utils.py +0 -811
  153. autogen/oai/together.py +0 -343
  154. autogen/retrieve_utils.py +0 -487
  155. autogen/runtime_logging.py +0 -163
  156. autogen/token_count_utils.py +0 -259
  157. autogen/types.py +0 -20
  158. autogen/version.py +0 -7
  159. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
  160. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
  161. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
autogen/oai/bedrock.py DELETED
@@ -1,606 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- """
8
- Create a compatible client for the Amazon Bedrock Converse API.
9
-
10
- Example usage:
11
- Install the `boto3` package by running `pip install --upgrade boto3`.
12
- - https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html
13
-
14
- import autogen
15
-
16
- config_list = [
17
- {
18
- "api_type": "bedrock",
19
- "model": "meta.llama3-1-8b-instruct-v1:0",
20
- "aws_region": "us-west-2",
21
- "aws_access_key": "",
22
- "aws_secret_key": "",
23
- "price" : [0.003, 0.015]
24
- }
25
- ]
26
-
27
- assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
28
-
29
- """
30
-
31
- from __future__ import annotations
32
-
33
- import base64
34
- import json
35
- import os
36
- import re
37
- import time
38
- import warnings
39
- from typing import Any, Dict, List, Literal, Tuple
40
-
41
- import boto3
42
- import requests
43
- from botocore.config import Config
44
- from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
45
- from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
46
- from openai.types.completion_usage import CompletionUsage
47
-
48
- from autogen.oai.client_utils import validate_parameter
49
-
50
-
51
- class BedrockClient:
52
- """Client for Amazon's Bedrock Converse API."""
53
-
54
- _retries = 5
55
-
56
- def __init__(self, **kwargs: Any):
57
- """
58
- Initialises BedrockClient for Amazon's Bedrock Converse API
59
- """
60
- self._aws_access_key = kwargs.get("aws_access_key", None)
61
- self._aws_secret_key = kwargs.get("aws_secret_key", None)
62
- self._aws_session_token = kwargs.get("aws_session_token", None)
63
- self._aws_region = kwargs.get("aws_region", None)
64
- self._aws_profile_name = kwargs.get("aws_profile_name", None)
65
-
66
- if not self._aws_access_key:
67
- self._aws_access_key = os.getenv("AWS_ACCESS_KEY")
68
-
69
- if not self._aws_secret_key:
70
- self._aws_secret_key = os.getenv("AWS_SECRET_KEY")
71
-
72
- if not self._aws_session_token:
73
- self._aws_session_token = os.getenv("AWS_SESSION_TOKEN")
74
-
75
- if not self._aws_region:
76
- self._aws_region = os.getenv("AWS_REGION")
77
-
78
- if self._aws_region is None:
79
- raise ValueError("Region is required to use the Amazon Bedrock API.")
80
-
81
- # Initialize Bedrock client, session, and runtime
82
- bedrock_config = Config(
83
- region_name=self._aws_region,
84
- signature_version="v4",
85
- retries={"max_attempts": self._retries, "mode": "standard"},
86
- )
87
-
88
- session = boto3.Session(
89
- aws_access_key_id=self._aws_access_key,
90
- aws_secret_access_key=self._aws_secret_key,
91
- aws_session_token=self._aws_session_token,
92
- profile_name=self._aws_profile_name,
93
- )
94
-
95
- self.bedrock_runtime = session.client(service_name="bedrock-runtime", config=bedrock_config)
96
-
97
- def message_retrieval(self, response):
98
- """Retrieve the messages from the response."""
99
- return [choice.message for choice in response.choices]
100
-
101
- def parse_custom_params(self, params: Dict[str, Any]):
102
- """
103
- Parses custom parameters for logic in this client class
104
- """
105
-
106
- # Should we separate system messages into its own request parameter, default is True
107
- # This is required because not all models support a system prompt (e.g. Mistral Instruct).
108
- self._supports_system_prompts = params.get("supports_system_prompts", True)
109
-
110
- def parse_params(self, params: Dict[str, Any]) -> tuple[Dict[str, Any], Dict[str, Any]]:
111
- """
112
- Loads the valid parameters required to invoke Bedrock Converse
113
- Returns a tuple of (base_params, additional_params)
114
- """
115
-
116
- base_params = {}
117
- additional_params = {}
118
-
119
- # Amazon Bedrock base model IDs are here:
120
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
121
- self._model_id = params.get("model", None)
122
- assert self._model_id, "Please provide the 'model` in the config_list to use Amazon Bedrock"
123
-
124
- # Parameters vary based on the model used.
125
- # As we won't cater for all models and parameters, it's the developer's
126
- # responsibility to implement the parameters and they will only be
127
- # included if the developer has it in the config.
128
- #
129
- # Important:
130
- # No defaults will be used (as they can vary per model)
131
- # No ranges will be used (as they can vary)
132
- # We will cover all the main parameters but there may be others
133
- # that need to be added later
134
- #
135
- # Here are some pages that show the parameters available for different models
136
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-text.html
137
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-text-completion.html
138
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-cohere-command-r-plus.html
139
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html
140
- # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral-chat-completion.html
141
-
142
- # Here are the possible "base" parameters and their suitable types
143
- base_parameters = [["temperature", (float, int)], ["topP", (float, int)], ["maxTokens", (int)]]
144
-
145
- for param_name, suitable_types in base_parameters:
146
- if param_name in params:
147
- base_params[param_name] = validate_parameter(
148
- params, param_name, suitable_types, False, None, None, None
149
- )
150
-
151
- # Here are the possible "model-specific" parameters and their suitable types, known as additional parameters
152
- additional_parameters = [
153
- ["top_p", (float, int)],
154
- ["top_k", (int)],
155
- ["k", (int)],
156
- ["seed", (int)],
157
- ]
158
-
159
- for param_name, suitable_types in additional_parameters:
160
- if param_name in params:
161
- additional_params[param_name] = validate_parameter(
162
- params, param_name, suitable_types, False, None, None, None
163
- )
164
-
165
- # Streaming
166
- if "stream" in params:
167
- self._streaming = params["stream"]
168
- else:
169
- self._streaming = False
170
-
171
- # For this release we will not support streaming as many models do not support streaming with tool use
172
- if self._streaming:
173
- warnings.warn(
174
- "Streaming is not currently supported, streaming will be disabled.",
175
- UserWarning,
176
- )
177
- self._streaming = False
178
-
179
- return base_params, additional_params
180
-
181
- def create(self, params):
182
- """Run Amazon Bedrock inference and return AutoGen response"""
183
-
184
- # Set custom client class settings
185
- self.parse_custom_params(params)
186
-
187
- # Parse the inference parameters
188
- base_params, additional_params = self.parse_params(params)
189
-
190
- has_tools = "tools" in params
191
- messages = oai_messages_to_bedrock_messages(params["messages"], has_tools, self._supports_system_prompts)
192
-
193
- if self._supports_system_prompts:
194
- system_messages = extract_system_messages(params["messages"])
195
-
196
- tool_config = format_tools(params["tools"] if has_tools else [])
197
-
198
- request_args = {"messages": messages, "modelId": self._model_id}
199
-
200
- # Base and additional args
201
- if len(base_params) > 0:
202
- request_args["inferenceConfig"] = base_params
203
-
204
- if len(additional_params) > 0:
205
- request_args["additionalModelRequestFields"] = additional_params
206
-
207
- if self._supports_system_prompts:
208
- request_args["system"] = system_messages
209
-
210
- if len(tool_config["tools"]) > 0:
211
- request_args["toolConfig"] = tool_config
212
-
213
- response = self.bedrock_runtime.converse(**request_args)
214
- if response is None:
215
- raise RuntimeError(f"Failed to get response from Bedrock after retrying {self._retries} times.")
216
-
217
- finish_reason = convert_stop_reason_to_finish_reason(response["stopReason"])
218
- response_message = response["output"]["message"]
219
-
220
- if finish_reason == "tool_calls":
221
- tool_calls = format_tool_calls(response_message["content"])
222
- # text = ""
223
- else:
224
- tool_calls = None
225
-
226
- text = ""
227
- for content in response_message["content"]:
228
- if "text" in content:
229
- text = content["text"]
230
- # NOTE: other types of output may be dealt with here
231
-
232
- message = ChatCompletionMessage(role="assistant", content=text, tool_calls=tool_calls)
233
-
234
- response_usage = response["usage"]
235
- usage = CompletionUsage(
236
- prompt_tokens=response_usage["inputTokens"],
237
- completion_tokens=response_usage["outputTokens"],
238
- total_tokens=response_usage["totalTokens"],
239
- )
240
-
241
- return ChatCompletion(
242
- id=response["ResponseMetadata"]["RequestId"],
243
- choices=[Choice(finish_reason=finish_reason, index=0, message=message)],
244
- created=int(time.time()),
245
- model=self._model_id,
246
- object="chat.completion",
247
- usage=usage,
248
- )
249
-
250
- def cost(self, response: ChatCompletion) -> float:
251
- """Calculate the cost of the response."""
252
- return calculate_cost(response.usage.prompt_tokens, response.usage.completion_tokens, response.model)
253
-
254
- @staticmethod
255
- def get_usage(response) -> Dict:
256
- """Get the usage of tokens and their cost information."""
257
- return {
258
- "prompt_tokens": response.usage.prompt_tokens,
259
- "completion_tokens": response.usage.completion_tokens,
260
- "total_tokens": response.usage.total_tokens,
261
- "cost": response.cost,
262
- "model": response.model,
263
- }
264
-
265
-
266
- def extract_system_messages(messages: List[dict]) -> List:
267
- """Extract the system messages from the list of messages.
268
-
269
- Args:
270
- messages (list[dict]): List of messages.
271
-
272
- Returns:
273
- List[SystemMessage]: List of System messages.
274
- """
275
-
276
- """
277
- system_messages = [message.get("content")[0]["text"] for message in messages if message.get("role") == "system"]
278
- return system_messages # ''.join(system_messages)
279
- """
280
-
281
- for message in messages:
282
- if message.get("role") == "system":
283
- if isinstance(message["content"], str):
284
- return [{"text": message.get("content")}]
285
- else:
286
- return [{"text": message.get("content")[0]["text"]}]
287
- return []
288
-
289
-
290
- def oai_messages_to_bedrock_messages(
291
- messages: List[Dict[str, Any]], has_tools: bool, supports_system_prompts: bool
292
- ) -> List[Dict]:
293
- """
294
- Convert messages from OAI format to Bedrock format.
295
- We correct for any specific role orders and types, etc.
296
- AWS Bedrock requires messages to alternate between user and assistant roles. This function ensures that the messages
297
- are in the correct order and format for Bedrock by inserting "Please continue" messages as needed.
298
- This is the same method as the one in the Autogen Anthropic client
299
- """
300
-
301
- # Track whether we have tools passed in. If not, tool use / result messages should be converted to text messages.
302
- # Bedrock requires a tools parameter with the tools listed, if there are other messages with tool use or tool results.
303
- # This can occur when we don't need tool calling, such as for group chat speaker selection
304
-
305
- # Convert messages to Bedrock compliant format
306
-
307
- # Take out system messages if the model supports it, otherwise leave them in.
308
- if supports_system_prompts:
309
- messages = [x for x in messages if not x["role"] == "system"]
310
- else:
311
- # Replace role="system" with role="user"
312
- for msg in messages:
313
- if msg["role"] == "system":
314
- msg["role"] = "user"
315
-
316
- processed_messages = []
317
-
318
- # Used to interweave user messages to ensure user/assistant alternating
319
- user_continue_message = {"content": [{"text": "Please continue."}], "role": "user"}
320
- assistant_continue_message = {
321
- "content": [{"text": "Please continue."}],
322
- "role": "assistant",
323
- }
324
-
325
- tool_use_messages = 0
326
- tool_result_messages = 0
327
- last_tool_use_index = -1
328
- last_tool_result_index = -1
329
- # user_role_index = 0 if supports_system_prompts else 1 # If system prompts are supported, messages start with user, otherwise they'll be the second message
330
- for message in messages:
331
- # New messages will be added here, manage role alternations
332
- expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"
333
-
334
- if "tool_calls" in message:
335
- # Map the tool call options to Bedrock's format
336
- tool_uses = []
337
- tool_names = []
338
- for tool_call in message["tool_calls"]:
339
- tool_uses.append(
340
- {
341
- "toolUse": {
342
- "toolUseId": tool_call["id"],
343
- "name": tool_call["function"]["name"],
344
- "input": json.loads(tool_call["function"]["arguments"]),
345
- }
346
- }
347
- )
348
- if has_tools:
349
- tool_use_messages += 1
350
- tool_names.append(tool_call["function"]["name"])
351
-
352
- if expected_role == "user":
353
- # Insert an extra user message as we will append an assistant message
354
- processed_messages.append(user_continue_message)
355
-
356
- if has_tools:
357
- processed_messages.append({"role": "assistant", "content": tool_uses})
358
- last_tool_use_index = len(processed_messages) - 1
359
- else:
360
- # Not using tools, so put in a plain text message
361
- processed_messages.append(
362
- {
363
- "role": "assistant",
364
- "content": [
365
- {"text": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]"}
366
- ],
367
- }
368
- )
369
- elif "tool_call_id" in message:
370
- if has_tools:
371
- # Map the tool usage call to tool_result for Bedrock
372
- tool_result = {
373
- "toolResult": {
374
- "toolUseId": message["tool_call_id"],
375
- "content": [{"text": message["content"]}],
376
- }
377
- }
378
-
379
- # If the previous message also had a tool_result, add it to that
380
- # Otherwise append a new message
381
- if last_tool_result_index == len(processed_messages) - 1:
382
- processed_messages[-1]["content"].append(tool_result)
383
- else:
384
- if expected_role == "assistant":
385
- # Insert an extra assistant message as we will append a user message
386
- processed_messages.append(assistant_continue_message)
387
-
388
- processed_messages.append({"role": "user", "content": [tool_result]})
389
- last_tool_result_index = len(processed_messages) - 1
390
-
391
- tool_result_messages += 1
392
- else:
393
- # Not using tools, so put in a plain text message
394
- processed_messages.append(
395
- {
396
- "role": "user",
397
- "content": [{"text": f"Running the function returned: {message['content']}"}],
398
- }
399
- )
400
- elif message["content"] == "":
401
- # Ignoring empty messages
402
- pass
403
- else:
404
- if expected_role != message["role"] and not (len(processed_messages) == 0 and message["role"] == "system"):
405
- # Inserting the alternating continue message (ignore if it's the first message and a system message)
406
- processed_messages.append(
407
- user_continue_message if expected_role == "user" else assistant_continue_message
408
- )
409
-
410
- processed_messages.append(
411
- {
412
- "role": message["role"],
413
- "content": parse_content_parts(message=message),
414
- }
415
- )
416
-
417
- # We'll replace the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
418
- if has_tools and tool_use_messages != tool_result_messages:
419
- processed_messages[last_tool_use_index] = assistant_continue_message
420
-
421
- # name is not a valid field on messages
422
- for message in processed_messages:
423
- if "name" in message:
424
- message.pop("name", None)
425
-
426
- # Note: When using reflection_with_llm we may end up with an "assistant" message as the last message and that may cause a blank response
427
- # So, if the last role is not user, add a 'user' continue message at the end
428
- if processed_messages[-1]["role"] != "user":
429
- processed_messages.append(user_continue_message)
430
-
431
- return processed_messages
432
-
433
-
434
- def parse_content_parts(
435
- message: Dict[str, Any],
436
- ) -> List[dict]:
437
- content: str | List[Dict[str, Any]] = message.get("content")
438
- if isinstance(content, str):
439
- return [
440
- {
441
- "text": content,
442
- }
443
- ]
444
- content_parts = []
445
- for part in content:
446
- # part_content: Dict = part.get("content")
447
- if "text" in part: # part_content:
448
- content_parts.append(
449
- {
450
- "text": part.get("text"),
451
- }
452
- )
453
- elif "image_url" in part: # part_content:
454
- image_data, content_type = parse_image(part.get("image_url").get("url"))
455
- content_parts.append(
456
- {
457
- "image": {
458
- "format": content_type[6:], # image/
459
- "source": {"bytes": image_data},
460
- },
461
- }
462
- )
463
- else:
464
- # Ignore..
465
- continue
466
- return content_parts
467
-
468
-
469
- def parse_image(image_url: str) -> Tuple[bytes, str]:
470
- """Try to get the raw data from an image url.
471
-
472
- Ref: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ImageSource.html
473
- returns a tuple of (Image Data, Content Type)
474
- """
475
- pattern = r"^data:(image/[a-z]*);base64,\s*"
476
- content_type = re.search(pattern, image_url)
477
- # if already base64 encoded.
478
- # Only supports 'image/jpeg', 'image/png', 'image/gif' or 'image/webp'
479
- if content_type:
480
- image_data = re.sub(pattern, "", image_url)
481
- return base64.b64decode(image_data), content_type.group(1)
482
-
483
- # Send a request to the image URL
484
- response = requests.get(image_url)
485
- # Check if the request was successful
486
- if response.status_code == 200:
487
-
488
- content_type = response.headers.get("Content-Type")
489
- if not content_type.startswith("image"):
490
- content_type = "image/jpeg"
491
- # Get the image content
492
- image_content = response.content
493
- return image_content, content_type
494
- else:
495
- raise RuntimeError("Unable to access the image url")
496
-
497
-
498
- def format_tools(tools: List[Dict[str, Any]]) -> Dict[Literal["tools"], List[Dict[str, Any]]]:
499
- converted_schema = {"tools": []}
500
-
501
- for tool in tools:
502
- if tool["type"] == "function":
503
- function = tool["function"]
504
- converted_tool = {
505
- "toolSpec": {
506
- "name": function["name"],
507
- "description": function["description"],
508
- "inputSchema": {"json": {"type": "object", "properties": {}, "required": []}},
509
- }
510
- }
511
-
512
- for prop_name, prop_details in function["parameters"]["properties"].items():
513
- converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name] = {
514
- "type": prop_details["type"],
515
- "description": prop_details.get("description", ""),
516
- }
517
- if "enum" in prop_details:
518
- converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name]["enum"] = prop_details[
519
- "enum"
520
- ]
521
- if "default" in prop_details:
522
- converted_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop_name]["default"] = (
523
- prop_details["default"]
524
- )
525
-
526
- if "required" in function["parameters"]:
527
- converted_tool["toolSpec"]["inputSchema"]["json"]["required"] = function["parameters"]["required"]
528
-
529
- converted_schema["tools"].append(converted_tool)
530
-
531
- return converted_schema
532
-
533
-
534
- def format_tool_calls(content):
535
- """Converts Converse API response tool calls to AutoGen format"""
536
- tool_calls = []
537
- for tool_request in content:
538
- if "toolUse" in tool_request:
539
- tool = tool_request["toolUse"]
540
-
541
- tool_calls.append(
542
- ChatCompletionMessageToolCall(
543
- id=tool["toolUseId"],
544
- function={
545
- "name": tool["name"],
546
- "arguments": json.dumps(tool["input"]),
547
- },
548
- type="function",
549
- )
550
- )
551
- return tool_calls
552
-
553
-
554
- def convert_stop_reason_to_finish_reason(
555
- stop_reason: str,
556
- ) -> Literal["stop", "length", "tool_calls", "content_filter"]:
557
- """
558
- Converts Bedrock finish reasons to our finish reasons, according to OpenAI:
559
-
560
- - stop: if the model hit a natural stop point or a provided stop sequence,
561
- - length: if the maximum number of tokens specified in the request was reached,
562
- - content_filter: if content was omitted due to a flag from our content filters,
563
- - tool_calls: if the model called a tool
564
- """
565
- if stop_reason:
566
- finish_reason_mapping = {
567
- "tool_use": "tool_calls",
568
- "finished": "stop",
569
- "end_turn": "stop",
570
- "max_tokens": "length",
571
- "stop_sequence": "stop",
572
- "complete": "stop",
573
- "content_filtered": "content_filter",
574
- }
575
- return finish_reason_mapping.get(stop_reason.lower(), stop_reason.lower())
576
-
577
- warnings.warn(f"Unsupported stop reason: {stop_reason}", UserWarning)
578
- return None
579
-
580
-
581
- # NOTE: As this will be quite dynamic, it's expected that the developer will use the "price" parameter in their config
582
- # These may be removed.
583
- PRICES_PER_K_TOKENS = {
584
- "meta.llama3-8b-instruct-v1:0": (0.0003, 0.0006),
585
- "meta.llama3-70b-instruct-v1:0": (0.00265, 0.0035),
586
- "mistral.mistral-7b-instruct-v0:2": (0.00015, 0.0002),
587
- "mistral.mixtral-8x7b-instruct-v0:1": (0.00045, 0.0007),
588
- "mistral.mistral-large-2402-v1:0": (0.004, 0.012),
589
- "mistral.mistral-small-2402-v1:0": (0.001, 0.003),
590
- }
591
-
592
-
593
- def calculate_cost(input_tokens: int, output_tokens: int, model_id: str) -> float:
594
- """Calculate the cost of the completion using the Bedrock pricing."""
595
-
596
- if model_id in PRICES_PER_K_TOKENS:
597
- input_cost_per_k, output_cost_per_k = PRICES_PER_K_TOKENS[model_id]
598
- input_cost = (input_tokens / 1000) * input_cost_per_k
599
- output_cost = (output_tokens / 1000) * output_cost_per_k
600
- return input_cost + output_cost
601
- else:
602
- warnings.warn(
603
- f'Cannot get the costs for {model_id}. The cost will be 0. In your config_list, add field {{"price" : [prompt_price_per_1k, completion_token_price_per_1k]}} for customized pricing.',
604
- UserWarning,
605
- )
606
- return 0