ag2 0.4.1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (161) hide show
  1. ag2-0.4.2b1.dist-info/METADATA +19 -0
  2. ag2-0.4.2b1.dist-info/RECORD +6 -0
  3. ag2-0.4.2b1.dist-info/top_level.txt +1 -0
  4. ag2-0.4.1.dist-info/METADATA +0 -500
  5. ag2-0.4.1.dist-info/RECORD +0 -158
  6. ag2-0.4.1.dist-info/top_level.txt +0 -1
  7. autogen/__init__.py +0 -17
  8. autogen/_pydantic.py +0 -116
  9. autogen/agentchat/__init__.py +0 -42
  10. autogen/agentchat/agent.py +0 -142
  11. autogen/agentchat/assistant_agent.py +0 -85
  12. autogen/agentchat/chat.py +0 -306
  13. autogen/agentchat/contrib/__init__.py +0 -0
  14. autogen/agentchat/contrib/agent_builder.py +0 -788
  15. autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
  16. autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
  17. autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
  18. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
  19. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
  20. autogen/agentchat/contrib/agent_eval/task.py +0 -43
  21. autogen/agentchat/contrib/agent_optimizer.py +0 -450
  22. autogen/agentchat/contrib/capabilities/__init__.py +0 -0
  23. autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
  24. autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
  25. autogen/agentchat/contrib/capabilities/teachability.py +0 -406
  26. autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
  27. autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
  28. autogen/agentchat/contrib/capabilities/transforms.py +0 -565
  29. autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
  30. autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
  31. autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
  32. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
  33. autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
  34. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
  35. autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
  36. autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
  37. autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
  38. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
  39. autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
  40. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
  41. autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
  42. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
  43. autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
  44. autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
  45. autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
  46. autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
  47. autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
  48. autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
  49. autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
  50. autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
  51. autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
  52. autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
  53. autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
  54. autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
  55. autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
  56. autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
  57. autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
  58. autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
  59. autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
  60. autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
  61. autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
  62. autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
  63. autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
  64. autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
  65. autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
  66. autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
  67. autogen/agentchat/contrib/captainagent.py +0 -490
  68. autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
  69. autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
  70. autogen/agentchat/contrib/graph_rag/document.py +0 -30
  71. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
  72. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
  73. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
  74. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
  75. autogen/agentchat/contrib/img_utils.py +0 -390
  76. autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
  77. autogen/agentchat/contrib/llava_agent.py +0 -176
  78. autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
  79. autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
  80. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
  81. autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
  82. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
  83. autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
  84. autogen/agentchat/contrib/swarm_agent.py +0 -463
  85. autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
  86. autogen/agentchat/contrib/tool_retriever.py +0 -120
  87. autogen/agentchat/contrib/vectordb/__init__.py +0 -0
  88. autogen/agentchat/contrib/vectordb/base.py +0 -243
  89. autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
  90. autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
  91. autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
  92. autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
  93. autogen/agentchat/contrib/vectordb/utils.py +0 -126
  94. autogen/agentchat/contrib/web_surfer.py +0 -305
  95. autogen/agentchat/conversable_agent.py +0 -2908
  96. autogen/agentchat/groupchat.py +0 -1668
  97. autogen/agentchat/user_proxy_agent.py +0 -109
  98. autogen/agentchat/utils.py +0 -207
  99. autogen/browser_utils.py +0 -291
  100. autogen/cache/__init__.py +0 -10
  101. autogen/cache/abstract_cache_base.py +0 -78
  102. autogen/cache/cache.py +0 -182
  103. autogen/cache/cache_factory.py +0 -85
  104. autogen/cache/cosmos_db_cache.py +0 -150
  105. autogen/cache/disk_cache.py +0 -109
  106. autogen/cache/in_memory_cache.py +0 -61
  107. autogen/cache/redis_cache.py +0 -128
  108. autogen/code_utils.py +0 -745
  109. autogen/coding/__init__.py +0 -22
  110. autogen/coding/base.py +0 -113
  111. autogen/coding/docker_commandline_code_executor.py +0 -262
  112. autogen/coding/factory.py +0 -45
  113. autogen/coding/func_with_reqs.py +0 -203
  114. autogen/coding/jupyter/__init__.py +0 -22
  115. autogen/coding/jupyter/base.py +0 -32
  116. autogen/coding/jupyter/docker_jupyter_server.py +0 -164
  117. autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
  118. autogen/coding/jupyter/jupyter_client.py +0 -224
  119. autogen/coding/jupyter/jupyter_code_executor.py +0 -161
  120. autogen/coding/jupyter/local_jupyter_server.py +0 -168
  121. autogen/coding/local_commandline_code_executor.py +0 -410
  122. autogen/coding/markdown_code_extractor.py +0 -44
  123. autogen/coding/utils.py +0 -57
  124. autogen/exception_utils.py +0 -46
  125. autogen/extensions/__init__.py +0 -0
  126. autogen/formatting_utils.py +0 -76
  127. autogen/function_utils.py +0 -362
  128. autogen/graph_utils.py +0 -148
  129. autogen/io/__init__.py +0 -15
  130. autogen/io/base.py +0 -105
  131. autogen/io/console.py +0 -43
  132. autogen/io/websockets.py +0 -213
  133. autogen/logger/__init__.py +0 -11
  134. autogen/logger/base_logger.py +0 -140
  135. autogen/logger/file_logger.py +0 -287
  136. autogen/logger/logger_factory.py +0 -29
  137. autogen/logger/logger_utils.py +0 -42
  138. autogen/logger/sqlite_logger.py +0 -459
  139. autogen/math_utils.py +0 -356
  140. autogen/oai/__init__.py +0 -33
  141. autogen/oai/anthropic.py +0 -428
  142. autogen/oai/bedrock.py +0 -606
  143. autogen/oai/cerebras.py +0 -270
  144. autogen/oai/client.py +0 -1148
  145. autogen/oai/client_utils.py +0 -167
  146. autogen/oai/cohere.py +0 -453
  147. autogen/oai/completion.py +0 -1216
  148. autogen/oai/gemini.py +0 -469
  149. autogen/oai/groq.py +0 -281
  150. autogen/oai/mistral.py +0 -279
  151. autogen/oai/ollama.py +0 -582
  152. autogen/oai/openai_utils.py +0 -811
  153. autogen/oai/together.py +0 -343
  154. autogen/retrieve_utils.py +0 -487
  155. autogen/runtime_logging.py +0 -163
  156. autogen/token_count_utils.py +0 -259
  157. autogen/types.py +0 -20
  158. autogen/version.py +0 -7
  159. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
  160. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
  161. {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
autogen/oai/anthropic.py DELETED
@@ -1,428 +0,0 @@
1
- # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
- #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
- # SPDX-License-Identifier: MIT
7
- """
8
- Create an OpenAI-compatible client for the Anthropic API.
9
-
10
- Example usage:
11
- Install the `anthropic` package by running `pip install --upgrade anthropic`.
12
- - https://docs.anthropic.com/en/docs/quickstart-guide
13
-
14
- import autogen
15
-
16
- config_list = [
17
- {
18
- "model": "claude-3-sonnet-20240229",
19
- "api_key": os.getenv("ANTHROPIC_API_KEY"),
20
- "api_type": "anthropic",
21
- }
22
- ]
23
-
24
- assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
25
-
26
- Example usage for Anthropic Bedrock:
27
-
28
- Install the `anthropic` package by running `pip install --upgrade anthropic`.
29
- - https://docs.anthropic.com/en/docs/quickstart-guide
30
-
31
- import autogen
32
-
33
- config_list = [
34
- {
35
- "model": "anthropic.claude-3-5-sonnet-20240620-v1:0",
36
- "aws_access_key":<accessKey>,
37
- "aws_secret_key":<secretKey>,
38
- "aws_session_token":<sessionTok>,
39
- "aws_region":"us-east-1",
40
- "api_type": "anthropic",
41
- }
42
- ]
43
-
44
- assistant = autogen.AssistantAgent("assistant", llm_config={"config_list": config_list})
45
-
46
- """
47
-
48
- from __future__ import annotations
49
-
50
- import copy
51
- import inspect
52
- import json
53
- import os
54
- import time
55
- import warnings
56
- from typing import Any, Dict, List, Tuple, Union
57
-
58
- from anthropic import Anthropic, AnthropicBedrock
59
- from anthropic import __version__ as anthropic_version
60
- from anthropic.types import Completion, Message, TextBlock, ToolUseBlock
61
- from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
62
- from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
63
- from openai.types.completion_usage import CompletionUsage
64
- from typing_extensions import Annotated
65
-
66
- from autogen.oai.client_utils import validate_parameter
67
-
68
- TOOL_ENABLED = anthropic_version >= "0.23.1"
69
- if TOOL_ENABLED:
70
- from anthropic.types.tool_use_block_param import (
71
- ToolUseBlockParam,
72
- )
73
-
74
-
75
- ANTHROPIC_PRICING_1k = {
76
- "claude-3-5-sonnet-20240620": (0.003, 0.015),
77
- "claude-3-sonnet-20240229": (0.003, 0.015),
78
- "claude-3-opus-20240229": (0.015, 0.075),
79
- "claude-3-haiku-20240307": (0.00025, 0.00125),
80
- "claude-2.1": (0.008, 0.024),
81
- "claude-2.0": (0.008, 0.024),
82
- "claude-instant-1.2": (0.008, 0.024),
83
- }
84
-
85
-
86
- class AnthropicClient:
87
- def __init__(self, **kwargs: Any):
88
- """
89
- Initialize the Anthropic API client.
90
- Args:
91
- api_key (str): The API key for the Anthropic API or set the `ANTHROPIC_API_KEY` environment variable.
92
- """
93
- self._api_key = kwargs.get("api_key", None)
94
- self._aws_access_key = kwargs.get("aws_access_key", None)
95
- self._aws_secret_key = kwargs.get("aws_secret_key", None)
96
- self._aws_session_token = kwargs.get("aws_session_token", None)
97
- self._aws_region = kwargs.get("aws_region", None)
98
-
99
- if not self._api_key:
100
- self._api_key = os.getenv("ANTHROPIC_API_KEY")
101
-
102
- if not self._aws_access_key:
103
- self._aws_access_key = os.getenv("AWS_ACCESS_KEY")
104
-
105
- if not self._aws_secret_key:
106
- self._aws_secret_key = os.getenv("AWS_SECRET_KEY")
107
-
108
- if not self._aws_region:
109
- self._aws_region = os.getenv("AWS_REGION")
110
-
111
- if self._api_key is None and (
112
- self._aws_access_key is None or self._aws_secret_key is None or self._aws_region is None
113
- ):
114
- raise ValueError("API key or AWS credentials are required to use the Anthropic API.")
115
-
116
- if self._api_key is not None:
117
- self._client = Anthropic(api_key=self._api_key)
118
- else:
119
- self._client = AnthropicBedrock(
120
- aws_access_key=self._aws_access_key,
121
- aws_secret_key=self._aws_secret_key,
122
- aws_session_token=self._aws_session_token,
123
- aws_region=self._aws_region,
124
- )
125
-
126
- self._last_tooluse_status = {}
127
-
128
- def load_config(self, params: Dict[str, Any]):
129
- """Load the configuration for the Anthropic API client."""
130
- anthropic_params = {}
131
-
132
- anthropic_params["model"] = params.get("model", None)
133
- assert anthropic_params["model"], "Please provide a `model` in the config_list to use the Anthropic API."
134
-
135
- anthropic_params["temperature"] = validate_parameter(
136
- params, "temperature", (float, int), False, 1.0, (0.0, 1.0), None
137
- )
138
- anthropic_params["max_tokens"] = validate_parameter(params, "max_tokens", int, False, 4096, (1, None), None)
139
- anthropic_params["top_k"] = validate_parameter(params, "top_k", int, True, None, (1, None), None)
140
- anthropic_params["top_p"] = validate_parameter(params, "top_p", (float, int), True, None, (0.0, 1.0), None)
141
- anthropic_params["stop_sequences"] = validate_parameter(params, "stop_sequences", list, True, None, None, None)
142
- anthropic_params["stream"] = validate_parameter(params, "stream", bool, False, False, None, None)
143
-
144
- if anthropic_params["stream"]:
145
- warnings.warn(
146
- "Streaming is not currently supported, streaming will be disabled.",
147
- UserWarning,
148
- )
149
- anthropic_params["stream"] = False
150
-
151
- return anthropic_params
152
-
153
- def cost(self, response) -> float:
154
- """Calculate the cost of the completion using the Anthropic pricing."""
155
- return response.cost
156
-
157
- @property
158
- def api_key(self):
159
- return self._api_key
160
-
161
- @property
162
- def aws_access_key(self):
163
- return self._aws_access_key
164
-
165
- @property
166
- def aws_secret_key(self):
167
- return self._aws_secret_key
168
-
169
- @property
170
- def aws_session_token(self):
171
- return self._aws_session_token
172
-
173
- @property
174
- def aws_region(self):
175
- return self._aws_region
176
-
177
- def create(self, params: Dict[str, Any]) -> Completion:
178
- if "tools" in params:
179
- converted_functions = self.convert_tools_to_functions(params["tools"])
180
- params["functions"] = params.get("functions", []) + converted_functions
181
-
182
- # Convert AutoGen messages to Anthropic messages
183
- anthropic_messages = oai_messages_to_anthropic_messages(params)
184
- anthropic_params = self.load_config(params)
185
-
186
- # TODO: support stream
187
- params = params.copy()
188
- if "functions" in params:
189
- tools_configs = params.pop("functions")
190
- tools_configs = [self.openai_func_to_anthropic(tool) for tool in tools_configs]
191
- params["tools"] = tools_configs
192
-
193
- # Anthropic doesn't accept None values, so we need to use keyword argument unpacking instead of setting parameters.
194
- # Copy params we need into anthropic_params
195
- # Remove any that don't have values
196
- anthropic_params["messages"] = anthropic_messages
197
- if "system" in params:
198
- anthropic_params["system"] = params["system"]
199
- if "tools" in params:
200
- anthropic_params["tools"] = params["tools"]
201
- if anthropic_params["top_k"] is None:
202
- del anthropic_params["top_k"]
203
- if anthropic_params["top_p"] is None:
204
- del anthropic_params["top_p"]
205
- if anthropic_params["stop_sequences"] is None:
206
- del anthropic_params["stop_sequences"]
207
-
208
- response = self._client.messages.create(**anthropic_params)
209
-
210
- # Calculate and save the cost onto the response
211
- prompt_tokens = response.usage.input_tokens
212
- completion_tokens = response.usage.output_tokens
213
-
214
- message_text = ""
215
- if response is not None:
216
- # If we have tool use as the response, populate completed tool calls for our return OAI response
217
- if response.stop_reason == "tool_use":
218
- anthropic_finish = "tool_calls"
219
- tool_calls = []
220
- for content in response.content:
221
- if type(content) == ToolUseBlock:
222
- tool_calls.append(
223
- ChatCompletionMessageToolCall(
224
- id=content.id,
225
- function={"name": content.name, "arguments": json.dumps(content.input)},
226
- type="function",
227
- )
228
- )
229
- else:
230
- anthropic_finish = "stop"
231
- tool_calls = None
232
-
233
- # Retrieve any text content from the response
234
- for content in response.content:
235
- if type(content) == TextBlock:
236
- message_text = content.text
237
- break
238
-
239
- # Convert output back to AutoGen response format
240
- message = ChatCompletionMessage(
241
- role="assistant",
242
- content=message_text,
243
- function_call=None,
244
- tool_calls=tool_calls,
245
- )
246
- choices = [Choice(finish_reason=anthropic_finish, index=0, message=message)]
247
-
248
- response_oai = ChatCompletion(
249
- id=response.id,
250
- model=anthropic_params["model"],
251
- created=int(time.time()),
252
- object="chat.completion",
253
- choices=choices,
254
- usage=CompletionUsage(
255
- prompt_tokens=prompt_tokens,
256
- completion_tokens=completion_tokens,
257
- total_tokens=prompt_tokens + completion_tokens,
258
- ),
259
- cost=_calculate_cost(prompt_tokens, completion_tokens, anthropic_params["model"]),
260
- )
261
-
262
- return response_oai
263
-
264
- def message_retrieval(self, response) -> List:
265
- """
266
- Retrieve and return a list of strings or a list of Choice.Message from the response.
267
-
268
- NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
269
- since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
270
- """
271
- return [choice.message for choice in response.choices]
272
-
273
- @staticmethod
274
- def openai_func_to_anthropic(openai_func: dict) -> dict:
275
- res = openai_func.copy()
276
- res["input_schema"] = res.pop("parameters")
277
- return res
278
-
279
- @staticmethod
280
- def get_usage(response: ChatCompletion) -> Dict:
281
- """Get the usage of tokens and their cost information."""
282
- return {
283
- "prompt_tokens": response.usage.prompt_tokens if response.usage is not None else 0,
284
- "completion_tokens": response.usage.completion_tokens if response.usage is not None else 0,
285
- "total_tokens": response.usage.total_tokens if response.usage is not None else 0,
286
- "cost": response.cost if hasattr(response, "cost") else 0.0,
287
- "model": response.model,
288
- }
289
-
290
- @staticmethod
291
- def convert_tools_to_functions(tools: List) -> List:
292
- functions = []
293
- for tool in tools:
294
- if tool.get("type") == "function" and "function" in tool:
295
- functions.append(tool["function"])
296
-
297
- return functions
298
-
299
-
300
- def oai_messages_to_anthropic_messages(params: Dict[str, Any]) -> list[dict[str, Any]]:
301
- """Convert messages from OAI format to Anthropic format.
302
- We correct for any specific role orders and types, etc.
303
- """
304
-
305
- # Track whether we have tools passed in. If not, tool use / result messages should be converted to text messages.
306
- # Anthropic requires a tools parameter with the tools listed, if there are other messages with tool use or tool results.
307
- # This can occur when we don't need tool calling, such as for group chat speaker selection.
308
- has_tools = "tools" in params
309
-
310
- # Convert messages to Anthropic compliant format
311
- processed_messages = []
312
-
313
- # Used to interweave user messages to ensure user/assistant alternating
314
- user_continue_message = {"content": "Please continue.", "role": "user"}
315
- assistant_continue_message = {"content": "Please continue.", "role": "assistant"}
316
-
317
- tool_use_messages = 0
318
- tool_result_messages = 0
319
- last_tool_use_index = -1
320
- last_tool_result_index = -1
321
- for message in params["messages"]:
322
- if message["role"] == "system":
323
- params["system"] = params.get("system", "") + ("\n" if "system" in params else "") + message["content"]
324
- else:
325
- # New messages will be added here, manage role alternations
326
- expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"
327
-
328
- if "tool_calls" in message:
329
- # Map the tool call options to Anthropic's ToolUseBlock
330
- tool_uses = []
331
- tool_names = []
332
- for tool_call in message["tool_calls"]:
333
- tool_uses.append(
334
- ToolUseBlock(
335
- type="tool_use",
336
- id=tool_call["id"],
337
- name=tool_call["function"]["name"],
338
- input=json.loads(tool_call["function"]["arguments"]),
339
- )
340
- )
341
- if has_tools:
342
- tool_use_messages += 1
343
- tool_names.append(tool_call["function"]["name"])
344
-
345
- if expected_role == "user":
346
- # Insert an extra user message as we will append an assistant message
347
- processed_messages.append(user_continue_message)
348
-
349
- if has_tools:
350
- processed_messages.append({"role": "assistant", "content": tool_uses})
351
- last_tool_use_index = len(processed_messages) - 1
352
- else:
353
- # Not using tools, so put in a plain text message
354
- processed_messages.append(
355
- {
356
- "role": "assistant",
357
- "content": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]",
358
- }
359
- )
360
- elif "tool_call_id" in message:
361
- if has_tools:
362
- # Map the tool usage call to tool_result for Anthropic
363
- tool_result = {
364
- "type": "tool_result",
365
- "tool_use_id": message["tool_call_id"],
366
- "content": message["content"],
367
- }
368
-
369
- # If the previous message also had a tool_result, add it to that
370
- # Otherwise append a new message
371
- if last_tool_result_index == len(processed_messages) - 1:
372
- processed_messages[-1]["content"].append(tool_result)
373
- else:
374
- if expected_role == "assistant":
375
- # Insert an extra assistant message as we will append a user message
376
- processed_messages.append(assistant_continue_message)
377
-
378
- processed_messages.append({"role": "user", "content": [tool_result]})
379
- last_tool_result_index = len(processed_messages) - 1
380
-
381
- tool_result_messages += 1
382
- else:
383
- # Not using tools, so put in a plain text message
384
- processed_messages.append(
385
- {"role": "user", "content": f"Running the function returned: {message['content']}"}
386
- )
387
- elif message["content"] == "":
388
- # Ignoring empty messages
389
- pass
390
- else:
391
- if expected_role != message["role"]:
392
- # Inserting the alternating continue message
393
- processed_messages.append(
394
- user_continue_message if expected_role == "user" else assistant_continue_message
395
- )
396
-
397
- processed_messages.append(message)
398
-
399
- # We'll replace the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
400
- if has_tools and tool_use_messages != tool_result_messages:
401
- processed_messages[last_tool_use_index] = assistant_continue_message
402
-
403
- # name is not a valid field on messages
404
- for message in processed_messages:
405
- if "name" in message:
406
- message.pop("name", None)
407
-
408
- # Note: When using reflection_with_llm we may end up with an "assistant" message as the last message and that may cause a blank response
409
- # So, if the last role is not user, add a 'user' continue message at the end
410
- if processed_messages[-1]["role"] != "user":
411
- processed_messages.append(user_continue_message)
412
-
413
- return processed_messages
414
-
415
-
416
- def _calculate_cost(input_tokens: int, output_tokens: int, model: str) -> float:
417
- """Calculate the cost of the completion using the Anthropic pricing."""
418
- total = 0.0
419
-
420
- if model in ANTHROPIC_PRICING_1k:
421
- input_cost_per_1k, output_cost_per_1k = ANTHROPIC_PRICING_1k[model]
422
- input_cost = (input_tokens / 1000) * input_cost_per_1k
423
- output_cost = (output_tokens / 1000) * output_cost_per_1k
424
- total = input_cost + output_cost
425
- else:
426
- warnings.warn(f"Cost calculation not available for model {model}", UserWarning)
427
-
428
- return total