gwenflow 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. gwenflow-0.4.2/LICENSE +21 -0
  2. gwenflow-0.4.2/PKG-INFO +28 -0
  3. gwenflow-0.4.2/README.md +218 -0
  4. gwenflow-0.4.2/gwenflow/__init__.py +31 -0
  5. gwenflow-0.4.2/gwenflow/agents/__init__.py +5 -0
  6. gwenflow-0.4.2/gwenflow/agents/agent.py +398 -0
  7. gwenflow-0.4.2/gwenflow/agents/agent_code_executor.py +32 -0
  8. gwenflow-0.4.2/gwenflow/agents/run.py +14 -0
  9. gwenflow-0.4.2/gwenflow/agents/utils.py +17 -0
  10. gwenflow-0.4.2/gwenflow/embeddings/__init__.py +7 -0
  11. gwenflow-0.4.2/gwenflow/embeddings/base.py +18 -0
  12. gwenflow-0.4.2/gwenflow/embeddings/gwenlake.py +111 -0
  13. gwenflow-0.4.2/gwenflow/flows/__init__.py +7 -0
  14. gwenflow-0.4.2/gwenflow/flows/autoflow.py +105 -0
  15. gwenflow-0.4.2/gwenflow/flows/base.py +91 -0
  16. gwenflow-0.4.2/gwenflow/llms/__init__.py +11 -0
  17. gwenflow-0.4.2/gwenflow/llms/anthropic.py +111 -0
  18. gwenflow-0.4.2/gwenflow/llms/azure_openai.py +68 -0
  19. gwenflow-0.4.2/gwenflow/llms/base.py +123 -0
  20. gwenflow-0.4.2/gwenflow/llms/gwenlake.py +59 -0
  21. gwenflow-0.4.2/gwenflow/llms/mistralai.py +146 -0
  22. gwenflow-0.4.2/gwenflow/llms/ollama.py +50 -0
  23. gwenflow-0.4.2/gwenflow/llms/openai.py +169 -0
  24. gwenflow-0.4.2/gwenflow/memory/__init__.py +5 -0
  25. gwenflow-0.4.2/gwenflow/memory/base.py +39 -0
  26. gwenflow-0.4.2/gwenflow/memory/chat_memory_buffer.py +63 -0
  27. gwenflow-0.4.2/gwenflow/prompts/__init__.py +7 -0
  28. gwenflow-0.4.2/gwenflow/prompts/pipeline.py +41 -0
  29. gwenflow-0.4.2/gwenflow/prompts/template.py +97 -0
  30. gwenflow-0.4.2/gwenflow/readers/__init__.py +13 -0
  31. gwenflow-0.4.2/gwenflow/readers/base.py +56 -0
  32. gwenflow-0.4.2/gwenflow/readers/directory.py +159 -0
  33. gwenflow-0.4.2/gwenflow/readers/json.py +38 -0
  34. gwenflow-0.4.2/gwenflow/readers/pdf.py +41 -0
  35. gwenflow-0.4.2/gwenflow/readers/text.py +32 -0
  36. gwenflow-0.4.2/gwenflow/readers/website.py +128 -0
  37. gwenflow-0.4.2/gwenflow/reranker/__init__.py +7 -0
  38. gwenflow-0.4.2/gwenflow/reranker/base.py +19 -0
  39. gwenflow-0.4.2/gwenflow/reranker/gwenlake.py +103 -0
  40. gwenflow-0.4.2/gwenflow/stores/__init__.py +0 -0
  41. gwenflow-0.4.2/gwenflow/stores/opensearch.py +135 -0
  42. gwenflow-0.4.2/gwenflow/tasks/__init__.py +5 -0
  43. gwenflow-0.4.2/gwenflow/tasks/prompts.py +6 -0
  44. gwenflow-0.4.2/gwenflow/tasks/task.py +58 -0
  45. gwenflow-0.4.2/gwenflow/tools/__init__.py +16 -0
  46. gwenflow-0.4.2/gwenflow/tools/base.py +87 -0
  47. gwenflow-0.4.2/gwenflow/tools/duckduckgo.py +62 -0
  48. gwenflow-0.4.2/gwenflow/tools/pdf.py +16 -0
  49. gwenflow-0.4.2/gwenflow/tools/utils.py +66 -0
  50. gwenflow-0.4.2/gwenflow/tools/website.py +16 -0
  51. gwenflow-0.4.2/gwenflow/tools/wikipedia.py +65 -0
  52. gwenflow-0.4.2/gwenflow/types/__init__.py +17 -0
  53. gwenflow-0.4.2/gwenflow/types/chat_completion.py +140 -0
  54. gwenflow-0.4.2/gwenflow/types/chat_completion_chunk.py +139 -0
  55. gwenflow-0.4.2/gwenflow/types/chat_message.py +46 -0
  56. gwenflow-0.4.2/gwenflow/types/document.py +34 -0
  57. gwenflow-0.4.2/gwenflow/utils/__init__.py +21 -0
  58. gwenflow-0.4.2/gwenflow/utils/aws.py +54 -0
  59. gwenflow-0.4.2/gwenflow/utils/bytes.py +7 -0
  60. gwenflow-0.4.2/gwenflow/utils/json.py +51 -0
  61. gwenflow-0.4.2/gwenflow/utils/logger.py +38 -0
  62. gwenflow-0.4.2/gwenflow/utils/tokens.py +53 -0
  63. gwenflow-0.4.2/gwenflow/vector_stores/__init__.py +0 -0
  64. gwenflow-0.4.2/gwenflow/vector_stores/base.py +51 -0
  65. gwenflow-0.4.2/gwenflow/vector_stores/lancedb.py +156 -0
  66. gwenflow-0.4.2/gwenflow/vector_stores/qdrant.py +270 -0
  67. gwenflow-0.4.2/gwenflow.egg-info/PKG-INFO +28 -0
  68. gwenflow-0.4.2/gwenflow.egg-info/SOURCES.txt +71 -0
  69. gwenflow-0.4.2/gwenflow.egg-info/dependency_links.txt +1 -0
  70. gwenflow-0.4.2/gwenflow.egg-info/requires.txt +13 -0
  71. gwenflow-0.4.2/gwenflow.egg-info/top_level.txt +1 -0
  72. gwenflow-0.4.2/setup.cfg +4 -0
  73. gwenflow-0.4.2/setup.py +13 -0
gwenflow-0.4.2/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Gwenlake
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,28 @@
1
+ Metadata-Version: 2.2
2
+ Name: gwenflow
3
+ Version: 0.4.2
4
+ Summary: A framework for orchestrating applications powered by autonomous AI agents and LLMs.
5
+ Home-page: https://github.com/gwenlake/gwenflow
6
+ Author: The Gwenlake Team
7
+ Author-email: info@gwenlake.com
8
+ Requires-Python: >=3.11
9
+ License-File: LICENSE
10
+ Requires-Dist: httpx
11
+ Requires-Dist: pydantic
12
+ Requires-Dist: tqdm
13
+ Requires-Dist: pyyaml
14
+ Requires-Dist: beautifulsoup4
15
+ Requires-Dist: fsspec
16
+ Requires-Dist: rich
17
+ Requires-Dist: tiktoken
18
+ Requires-Dist: openai
19
+ Requires-Dist: langchain
20
+ Requires-Dist: pyarrow
21
+ Requires-Dist: lancedb
22
+ Requires-Dist: qdrant-client
23
+ Dynamic: author
24
+ Dynamic: author-email
25
+ Dynamic: home-page
26
+ Dynamic: requires-dist
27
+ Dynamic: requires-python
28
+ Dynamic: summary
@@ -0,0 +1,218 @@
1
+ <div align="center">
2
+
3
+ ![Logo of Gwenflow](./docs/images/gwenflow.png)
4
+
5
+ **A framework for orchestrating applications powered by autonomous AI agents and LLMs.**
6
+
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
8
+ [![GitHub release](https://img.shields.io/github/v/release/gwenlake/gwenflow)](https://github.com/your-username/gwenflow/releases)
9
+
10
+
11
+ </div>
12
+
13
+
14
+ ## Why Gwenflow?
15
+
16
+ Gwenflow, a framework designed by [Gwenlake](https://gwenlake.com),
17
+ streamlines the creation of customized, production-ready applications built around Agents and
18
+ Large Language Models (LLMs). It provides developers with the tools necessary
19
+ to integrate LLMs and Agents, enabling efficient and
20
+ scalable solutions tailored to specific business or user needs.
21
+
22
+ ## Installation
23
+
24
+ Install from the main branch to try the newest features:
25
+
26
+ ```bash
27
+ pip install -U git+https://github.com/gwenlake/gwenflow.git@main
28
+ ```
29
+
30
+ ## Usage
31
+
32
+ Load your OpenAI api key from an environment variable:
33
+
34
+ ```python
35
+ import os
36
+ from gwenflow import ChatOpenAI
37
+
38
+
39
+ llm = ChatOpenAI(
40
+ api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted
41
+ )
42
+ ```
43
+
44
+ or load your api key from a local .env file:
45
+
46
+ ```python
47
+ import os
48
+ import dotenv
49
+ from gwenflow import ChatOpenAI
50
+
51
+ dotenv.load_dotenv(override=True) # load you api key from .env
52
+
53
+ llm = ChatOpenAI()
54
+ ```
55
+
56
+ ## Chat
57
+
58
+ ```python
59
+ import os
60
+ from gwenflow import ChatOpenAI
61
+
62
+ dotenv.load_dotenv(override=True) # load you api key from .env
63
+
64
+ messages = [
65
+ {
66
+ "role": "user",
67
+ "content": "Describe Argentina in one sentence."
68
+ }
69
+ ]
70
+
71
+ llm = ChatOpenAI(model="gpt-4o-mini")
72
+ print( llm.invoke(messages=messages) )
73
+ ```
74
+
75
+ ## Agent
76
+
77
+ ```python
78
+ import os
79
+ from gwenflow import Agent
80
+
81
+ dotenv.load_dotenv(override=True) # load you OpenAI api key from .env
82
+
83
+ # automatically use gpt-4o-mini for the Agent
84
+ agent = Agent(
85
+ role="Agent",
86
+ description="You are a helpful agent.",
87
+ )
88
+
89
+ response = agent.run("how are you?")
90
+ print(response.content)
91
+ ```
92
+
93
+ ## Agents, Tasks and Tools
94
+
95
+ ```python
96
+ import requests
97
+ import json
98
+ import dotenv
99
+
100
+ from gwenflow import ChatOpenAI, Agent, Task, Tool
101
+
102
+
103
+ dotenv.load_dotenv(override=True) # load you api key from .env
104
+
105
+ # tool to get fx
106
+ def get_exchange_rate(currency_iso: str) -> str:
107
+ """Get the current exchange rate for a given currency. Currency MUST be in iso format."""
108
+ try:
109
+ response = requests.get("http://www.floatrates.com/daily/usd.json").json()
110
+ data = response[currency_iso.lower()]
111
+ return json.dumps(data)
112
+ except Exception as e:
113
+ print(f"Currency not found: {currency_iso}")
114
+ return "Currency not found"
115
+
116
+ tool_get_exchange_rate = Tool.from_function(get_exchange_rate)
117
+
118
+ # llm, agent and task
119
+ llm = ChatOpenAI(model="gpt-4o-mini")
120
+
121
+ agentfx = Agent(
122
+ role="Fx Analyst",
123
+ instructions="Get recent exchange rates data.",
124
+ llm=llm,
125
+ tools=[tool_get_exchange_rate],
126
+ )
127
+
128
+ # Loop on a list of tasks
129
+ queries = [
130
+ "Find the capital city of France?",
131
+ "What's the exchange rate of the Brazilian real?",
132
+ "What's the exchange rate of the Euro?",
133
+ "What's the exchange rate of the Chine Renminbi?",
134
+ "What's the exchange rate of the Chinese Yuan?",
135
+ "What's the exchange rate of the Tonga?"
136
+ ]
137
+
138
+ for query in queries:
139
+ task = Task(
140
+ description=query,
141
+ expected_output="Answer in one sentence and if there is a date, mention this date.",
142
+ agent=agentfx
143
+ )
144
+ print("")
145
+ print("Q:", query)
146
+ print("A:", task.run())
147
+ ```
148
+
149
+
150
+ ```
151
+ Q: Find the capital city of France?
152
+ A: The capital city of France is Paris.
153
+
154
+ Q: What's the exchange rate of the Brazilian real?
155
+ A: The exchange rate of the Brazilian real (BRL) is approximately 5.76, as of November 12, 2024.
156
+
157
+ Q: What's the exchange rate of the Euro?
158
+ A: The exchange rate of the Euro (EUR) is 0.9409 as of November 12, 2024.
159
+
160
+ Q: What's the exchange rate of the Chine Renminbi?
161
+ A: The exchange rate of the Chinese Renminbi (CNY) is 7.23 as of November 12, 2024.
162
+
163
+ Q: What's the exchange rate of the Chinese Yuan?
164
+ A: The exchange rate of the Chinese Yuan (CNY) is 7.23 as of November 12, 2024.
165
+
166
+ Q: What's the exchange rate of the Tonga?
167
+ A: The current exchange rate for the Tongan paʻanga (TOP) is 2.3662, as of November 12, 2024.
168
+ ```
169
+
170
+ ## Automated Flows, Agents with Langchain Tools
171
+
172
+ Run an agent with Langchain tools. Requires langchain and pip install langchain-experimental
173
+ ```
174
+ pip install langchain
175
+ pip install langchain-experimental
176
+ ```
177
+
178
+ > [!CAUTION]
179
+ > Python REPL can execute arbitrary code on the host machine (e.g., delete files, make network requests). Use with caution.
180
+ > For more information general security guidelines, please see https://python.langchain.com/docs/security/.
181
+
182
+ ```python
183
+ import dotenv
184
+ from gwenflow import ChatOpenAI, Tool, AutoFlow
185
+
186
+ import langchain.agents
187
+ from langchain_experimental.utilities import PythonREPL
188
+ from langchain_community.tools import WikipediaQueryRun
189
+ from langchain_community.utilities import WikipediaAPIWrapper
190
+
191
+
192
+ # Load API key from .env file
193
+ dotenv.load_dotenv(override=True)
194
+
195
+ python_repl = PythonREPL()
196
+ python_repl_tool = langchain.agents.Tool(
197
+ name="python_repl",
198
+ description="This tool can execute python code and shell commands (pip commands to modules installation) Use with caution",
199
+ func=python_repl.run,
200
+ )
201
+
202
+ api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=5000)
203
+ wikipedia = WikipediaQueryRun(api_wrapper=api_wrapper)
204
+
205
+ tool_python = Tool.from_langchain( python_repl_tool )
206
+ tool_wikipedia = Tool.from_langchain( wikipedia )
207
+
208
+
209
+ llm = ChatOpenAI(model="gpt-4o")
210
+
211
+ flow = AutoFlow(llm=llm, tools=[tool_python, tool_wikipedia])
212
+ flow.generate_tasks(objective="Tell me about the biography of Kamala Harris and produce a pptx name biography_auto.pptx")
213
+ flow.run()
214
+ ```
215
+
216
+ ## Contributing to Gwenflow
217
+
218
+ We are very open to the community's contributions - be it a quick fix of a typo, or a completely new feature! You don't need to be a Gwenflow expert to provide meaningful improvements.
@@ -0,0 +1,31 @@
1
+ import importlib.metadata
2
+
3
+ try:
4
+ __version__ = importlib.metadata.version(__package__)
5
+ except importlib.metadata.PackageNotFoundError:
6
+ # Case where package metadata is not available.
7
+ __version__ = ""
8
+
9
+
10
+ from gwenflow.llms import ChatGwenlake, ChatOpenAI, ChatAzureOpenAI, ChatOllama
11
+ from gwenflow.readers import SimpleDirectoryReader
12
+ from gwenflow.agents import Agent
13
+ from gwenflow.tasks import Task
14
+ from gwenflow.tools import Tool
15
+ from gwenflow.flows import Flow, AutoFlow
16
+ from gwenflow.types import Document
17
+
18
+
19
+ __all__ = [
20
+ "ChatGwenlake",
21
+ "ChatOpenAI",
22
+ "ChatAzureOpenAI",
23
+ "ChatOllama",
24
+ "Document",
25
+ "SimpleDirectoryReader",
26
+ "Agent",
27
+ "Task",
28
+ "Tool",
29
+ "Flow",
30
+ "AutoFlow",
31
+ ]
@@ -0,0 +1,5 @@
1
+ from gwenflow.agents.agent import Agent
2
+
3
+ __all__ = [
4
+ "Agent",
5
+ ]
@@ -0,0 +1,398 @@
1
+
2
+ import uuid
3
+ import json
4
+ from typing import List, Callable, Union, Optional, Any, Dict, Iterator, Literal, Sequence, overload, Type
5
+ from collections import defaultdict
6
+ from pydantic import BaseModel, model_validator, field_validator, Field
7
+ from datetime import datetime
8
+
9
+ from gwenflow.llms import ChatOpenAI
10
+ from gwenflow.types import ChatCompletionMessage, ChatCompletionMessageToolCall
11
+ from gwenflow.tools import BaseTool
12
+ from gwenflow.memory import ChatMemoryBuffer
13
+ from gwenflow.agents.run import RunResponse
14
+ from gwenflow.agents.utils import merge_chunk
15
+ from gwenflow.utils import logger
16
+
17
+
18
+ MAX_TURNS = 10
19
+
20
+
21
+ class Result(BaseModel):
22
+ """Encapsulates the possible return values for an agent function."""
23
+ value: str = ""
24
+ agent: Optional[Any] = None
25
+ context_variables: dict = {}
26
+
27
+
28
+ class Agent(BaseModel):
29
+
30
+ # --- Agent Settings
31
+ id: Optional[str] = Field(None, validate_default=True)
32
+ name: str
33
+
34
+ # --- Settings for system message
35
+ description: Optional[str] = "You are a helpful AI assistant."
36
+ task: Optional[str] = None
37
+ instructions: Optional[Union[str, List[str]]] = []
38
+ add_datetime_to_instructions: bool = True
39
+ markdown: bool = False
40
+ scrape_links: bool = True
41
+
42
+ response_model: Optional[str] = None
43
+
44
+ # --- Agent Model and Tools
45
+ llm: Optional[Any] = Field(None, validate_default=True)
46
+ tools: List[BaseTool] = []
47
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None
48
+
49
+ # --- Context and Memory
50
+ context_vars: Optional[List[str]] = []
51
+ memory: Optional[ChatMemoryBuffer] = None
52
+ keep_history: bool = False
53
+ metadata: Optional[Dict[str, Any]] = None
54
+
55
+ # --- Team of agents
56
+ team: Optional[List["Agent"]] = None
57
+
58
+
59
+ @field_validator("id", mode="before")
60
+ def set_id(cls, v: Optional[str]) -> str:
61
+ id = v or str(uuid.uuid4())
62
+ return id
63
+
64
+ @field_validator("instructions", mode="before")
65
+ def set_instructions(cls, v: Optional[Union[List, str]]) -> str:
66
+ if isinstance(v, str):
67
+ instructions = [v]
68
+ return instructions
69
+ return v
70
+
71
+ @field_validator("llm", mode="before")
72
+ def set_llm(cls, v: Optional[Any]) -> str:
73
+ llm = v or ChatOpenAI(model="gpt-4o-mini")
74
+ return llm
75
+
76
+ @model_validator(mode="after")
77
+ def model_valid(self) -> Any:
78
+ if self.memory is None and self.llm is not None:
79
+ token_limit = self.llm.get_context_window_size()
80
+ self.memory = ChatMemoryBuffer(token_limit=token_limit)
81
+ return self
82
+
83
+ def get_system_message(self, context: Optional[Any] = None):
84
+ """Return the system message for the Agent."""
85
+
86
+ system_message_lines = []
87
+
88
+ if self.description is not None:
89
+ system_message_lines.append(f"{self.description}\n")
90
+
91
+ if self.name is not None:
92
+ system_message_lines.append(f"Your name is: {self.name}.\n")
93
+
94
+ if self.task is not None:
95
+ system_message_lines.append(f"Your task is: {self.task}\n")
96
+
97
+ # instructions
98
+ instructions = self.instructions
99
+
100
+ if self.add_datetime_to_instructions:
101
+ instructions.append(f"The current time is { datetime.now() }")
102
+
103
+ if self.markdown and self.response_model is None:
104
+ instructions.append("Use markdown to format your answers.")
105
+
106
+ if self.scrape_links:
107
+ instructions.append("If you get a list of web links, systematically scrape the content of all the linked websites to extract detailed information about the topic.")
108
+
109
+ if self.response_model:
110
+ instructions.append("Use JSON to format your answers.")
111
+
112
+ if context is not None:
113
+ instructions.append("Always prefer information from the provided context over your own knowledge.")
114
+
115
+ if len(instructions) > 0:
116
+ system_message_lines.append("# Instructions")
117
+ system_message_lines.extend([f"- {instruction}" for instruction in instructions])
118
+ system_message_lines.append("")
119
+
120
+ if self.response_model:
121
+ system_message_lines.append("# Provide your output using the following JSON schema:")
122
+ if isinstance(self.response_model, str):
123
+ system_message_lines.append("<json_fields>")
124
+ system_message_lines.append(f"{ self.response_model.strip() }")
125
+ system_message_lines.append("</json_fields>\n\n")
126
+
127
+ # final system prompt
128
+ if len(system_message_lines) > 0:
129
+ return dict(role="system", content=("\n".join(system_message_lines)).strip())
130
+
131
+ return None
132
+
133
+ def get_user_message(self, user_prompt: Optional[str] = None, context: Optional[Any] = None):
134
+ """Return the user message for the Agent."""
135
+
136
+ prompt = ""
137
+
138
+ if context is not None:
139
+
140
+ prompt += "\n\nUse the following information from the knowledge base if it helps:\n"
141
+ prompt += "<context>\n"
142
+
143
+ if isinstance(context, str):
144
+ prompt += context + "\n"
145
+
146
+ elif isinstance(context, dict):
147
+ for key in context.keys():
148
+ prompt += f"<{key}>\n"
149
+ prompt += context.get(key) + "\n"
150
+ prompt += f"</{key}>\n\n"
151
+
152
+ prompt += "</context>\n\n"
153
+
154
+ if user_prompt:
155
+ if isinstance(user_prompt, str):
156
+ prompt += user_prompt
157
+ elif isinstance(user_prompt, dict):
158
+ prompt += user_prompt["content"]
159
+
160
+ return { "role": "user", "content": prompt }
161
+
162
+
163
+ def get_tools_openai_schema(self, tools: List[BaseTool]):
164
+ return [tool.openai_schema for tool in tools]
165
+
166
+ def get_tools_map(self, tools: List[BaseTool]):
167
+ return {tool.name: tool for tool in tools}
168
+
169
+ def handle_function_result(self, result) -> Result:
170
+ match result:
171
+ case Result() as result:
172
+ return result
173
+
174
+ case Agent() as agent:
175
+ return Result(
176
+ value=json.dumps({"assistant": self.name}),
177
+ agent=agent,
178
+ )
179
+ case _:
180
+ try:
181
+ return Result(value=str(result))
182
+ except Exception as e:
183
+ error_message = f"Failed to cast response to string: {result}. Make sure agent functions return a string or Result object. Error: {str(e)}"
184
+ logger.error(error_message)
185
+ raise TypeError(error_message)
186
+
187
+ def handle_tool_calls(
188
+ self,
189
+ tool_calls: List[ChatCompletionMessageToolCall],
190
+ tools: List[BaseTool],
191
+ ) -> RunResponse:
192
+
193
+ tool_map = self.get_tools_map(self.tools)
194
+
195
+ partial_response = RunResponse(messages=[], agent=None)
196
+
197
+ for tool_call in tool_calls:
198
+
199
+ name = tool_call.function.name
200
+
201
+ # handle missing tool case, skip to next tool
202
+ if name not in tool_map:
203
+ logger.debug(f"Tool {name} not found in function map.")
204
+ partial_response.messages.append(
205
+ {
206
+ "role": "tool",
207
+ "tool_call_id": tool_call.id,
208
+ "tool_name": name,
209
+ "content": f"Error: Tool {name} not found.",
210
+ }
211
+ )
212
+ continue
213
+
214
+ args = json.loads(tool_call.function.arguments)
215
+ logger.debug(f"Tool call: {name} with arguments {args}")
216
+
217
+ tool_result = tool_map[name].run(**args)
218
+
219
+ result: Result = self.handle_function_result(tool_result)
220
+
221
+ partial_response.messages.append(
222
+ {
223
+ "role": "tool",
224
+ "tool_call_id": tool_call.id,
225
+ "tool_name": name,
226
+ "content": result.value,
227
+ }
228
+ )
229
+
230
+ if result.agent:
231
+ partial_response.agent = result.agent
232
+
233
+ return partial_response
234
+
235
+ def invoke(self, messages: list, stream: bool = False) -> Union[Any, Iterator[Any]]:
236
+
237
+ tools = self.get_tools_openai_schema(self.tools)
238
+
239
+ params = {
240
+ "messages": messages,
241
+ "tools": tools or None,
242
+ "tool_choice": self.tool_choice,
243
+ "parse_response": False,
244
+ }
245
+
246
+ response_format = None
247
+ if self.response_model:
248
+ response_format = {"type": "json_object"}
249
+
250
+ if stream:
251
+ return self.llm.stream(**params, response_format=response_format)
252
+
253
+ return self.llm.invoke(**params, response_format=response_format)
254
+
255
+
256
+ def _run(
257
+ self,
258
+ user_prompt: Optional[str] = None,
259
+ *,
260
+ context: Optional[Any] = None,
261
+ stream: Optional[bool] = False,
262
+ **kwargs: Any,
263
+ ) -> Iterator[RunResponse]:
264
+
265
+ # prepare messages
266
+ messages_for_model = []
267
+ system_message = self.get_system_message(context=context)
268
+ if system_message:
269
+ messages_for_model.append(system_message)
270
+
271
+ if self.keep_history:
272
+ if len(self.memory.get())>0:
273
+ messages_for_model.extend(self.memory.get())
274
+
275
+ user_message = self.get_user_message(user_prompt, context=context)
276
+ if user_message:
277
+ messages_for_model.append(user_message)
278
+ if self.memory and self.keep_history:
279
+ self.memory.add_message(user_message)
280
+
281
+ # global loop
282
+ init_len = len(messages_for_model)
283
+ while len(messages_for_model) - init_len < MAX_TURNS:
284
+
285
+ if stream:
286
+ message = {
287
+ "content": "",
288
+ "sender": self.name,
289
+ "role": "assistant",
290
+ "function_call": None,
291
+ "tool_calls": defaultdict(
292
+ lambda: {
293
+ "function": {"arguments": "", "name": ""},
294
+ "id": "",
295
+ "type": "",
296
+ }
297
+ ),
298
+ }
299
+
300
+ completion = self.invoke(messages=messages_for_model, stream=True)
301
+
302
+ for chunk in completion:
303
+ if len(chunk.choices) > 0:
304
+ delta = json.loads(chunk.choices[0].delta.json())
305
+ if delta["role"] == "assistant":
306
+ delta["sender"] = self.name
307
+ if delta["content"]:
308
+ yield delta["content"]
309
+ delta.pop("role", None)
310
+ delta.pop("sender", None)
311
+ merge_chunk(message, delta)
312
+
313
+ message["tool_calls"] = list(message.get("tool_calls", {}).values())
314
+ message = ChatCompletionMessage(**message)
315
+
316
+ else:
317
+ completion = self.invoke(messages=messages_for_model)
318
+ message = completion.choices[0].message
319
+ message.sender = self.name
320
+
321
+ # add messages to the current message stack
322
+ message_dict = json.loads(message.model_dump_json())
323
+ messages_for_model.append(message_dict)
324
+
325
+ if not message.tool_calls:
326
+ self.memory.add_message(message_dict)
327
+ break
328
+
329
+ # handle tool calls and switching agents
330
+ partial_response = self.handle_tool_calls(message.tool_calls, self.tools)
331
+ messages_for_model.extend(partial_response.messages)
332
+ if partial_response.agent:
333
+ return partial_response.agent
334
+
335
+ content = messages_for_model[-1]["content"]
336
+ if self.response_model:
337
+ content = json.loads(content)
338
+
339
+ yield RunResponse(
340
+ content=content,
341
+ messages=messages_for_model[init_len:],
342
+ agent=self,
343
+ tools=self.tools,
344
+ )
345
+
346
+
347
+ def run(
348
+ self,
349
+ user_prompt: Optional[str] = None,
350
+ *,
351
+ context: Optional[Any] = None,
352
+ stream: Optional[bool] = False,
353
+ output_file: Optional[str] = None,
354
+ **kwargs: Any,
355
+ ) -> Union[RunResponse, Iterator[RunResponse]]:
356
+
357
+
358
+ agent_id = self.name or self.id
359
+
360
+ logger.debug("")
361
+ logger.debug("------------------------------------------")
362
+ logger.debug(f"Running Agent: { agent_id }")
363
+ logger.debug("------------------------------------------")
364
+ logger.debug("")
365
+
366
+ if stream:
367
+ response = self._run(
368
+ user_prompt=user_prompt,
369
+ context=context,
370
+ stream=True,
371
+ **kwargs,
372
+ )
373
+ return response
374
+
375
+ else:
376
+
377
+ response = self._run(
378
+ user_prompt=user_prompt,
379
+ context=context,
380
+ stream=False,
381
+ **kwargs,
382
+ )
383
+ response = next(response)
384
+
385
+ if output_file:
386
+ with open(output_file, "a") as file:
387
+
388
+ name = self.name or self.id
389
+
390
+ file.write("\n")
391
+ file.write("---\n\n")
392
+ file.write(f"# Agent: { name }\n")
393
+ if self.task:
394
+ file.write(f"{ self.task }\n")
395
+ file.write("\n")
396
+ file.write(response.content)
397
+
398
+ return response