foundationallm-agent-plugins-langchain 0.9.7.post3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (15) hide show
  1. foundationallm_agent_plugins_langchain-0.9.7.post3/PKG-INFO +26 -0
  2. foundationallm_agent_plugins_langchain-0.9.7.post3/README.md +13 -0
  3. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/__init__.py +1 -0
  4. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/_metadata/foundationallm_manifest.json +7 -0
  5. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/workflow_plugin_manager.py +88 -0
  6. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/workflows/__init__.py +3 -0
  7. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/workflows/foundationallm_langchain_agent_workflow.py +237 -0
  8. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/workflows/foundationallm_langchain_lcel_workflow.py +285 -0
  9. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain/workflows/foundationallm_langgraph_react_agent_workflow.py +225 -0
  10. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain.egg-info/PKG-INFO +26 -0
  11. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain.egg-info/SOURCES.txt +13 -0
  12. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain.egg-info/dependency_links.txt +1 -0
  13. foundationallm_agent_plugins_langchain-0.9.7.post3/foundationallm_agent_plugins_langchain.egg-info/top_level.txt +2 -0
  14. foundationallm_agent_plugins_langchain-0.9.7.post3/pyproject.toml +24 -0
  15. foundationallm_agent_plugins_langchain-0.9.7.post3/setup.cfg +4 -0
@@ -0,0 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: foundationallm-agent-plugins-langchain
3
+ Version: 0.9.7.post3
4
+ Summary: FoundationaLLM Agent plugins.
5
+ Author-email: FoundationaLLM <dev@foundationallm.ai>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://foundationallm.ai
8
+ Project-URL: Issues, https://github.com/foundationallm/foundationallm/issues
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.11
12
+ Description-Content-Type: text/markdown
13
+
14
+ # FoundationaLLM Agent Plugins LangChain
15
+
16
+ This package provides the FoundationaLLM agent plugins built on LangChain.
17
+
18
+ To package and publish the package run the following commands (in the folder where `pyproject.toml` is located):
19
+
20
+ ```cmd
21
+ python -m pip install --upgrade build
22
+ python -m pip install --upgrade twine
23
+
24
+ python -m build
25
+ python -m twine upload dist/*
26
+ ```
@@ -0,0 +1,13 @@
1
+ # FoundationaLLM Agent Plugins LangChain
2
+
3
+ This package provides the FoundationaLLM agent plugins built on LangChain.
4
+
5
+ To package and publish the package run the following commands (in the folder where `pyproject.toml` is located):
6
+
7
+ ```cmd
8
+ python -m pip install --upgrade build
9
+ python -m pip install --upgrade twine
10
+
11
+ python -m build
12
+ python -m twine upload dist/*
13
+ ```
@@ -0,0 +1 @@
1
+ from .workflow_plugin_manager import FoundationaLLMAgentWorkflowLangChainPluginManager
@@ -0,0 +1,7 @@
1
+ {
2
+ "display_name": "FoundationaLLM Agent Plugins LangChain (Python)",
3
+ "description": "The FoundationaLLM Agent plugins (built on LangChain) package for Python.",
4
+ "plugin_managers": [
5
+ "FoundationaLLMAgentWorkflowLangChainPluginManager"
6
+ ]
7
+ }
@@ -0,0 +1,88 @@
1
+ from typing import List
2
+
3
+ from foundationallm.config import Configuration, UserIdentity
4
+ from foundationallm.models.agents import (
5
+ AgentTool,
6
+ GenericAgentWorkflow,
7
+ AgentWorkflowBase
8
+ )
9
+ from foundationallm.langchain.common import FoundationaLLMWorkflowBase
10
+ from foundationallm.operations import OperationsManager
11
+ from foundationallm.plugins import WorkflowPluginManagerBase
12
+ from foundationallm_agent_plugins_langchain.workflows import (
13
+ FoundationaLLMLangChainAgentWorkflow,
14
+ FoundationaLLMLangChainLCELWorkflow,
15
+ FoundationaLLMLangGraphReActAgentWorkflow,
16
+ )
17
+
18
+ class FoundationaLLMAgentWorkflowLangChainPluginManager(WorkflowPluginManagerBase):
19
+
20
+ FOUNDATIONALLM_LANGCHAIN_AGENT_WORKFLOW_CLASS_NAME = 'FoundationaLLMLangChainAgentWorkflow'
21
+ FOUNDATIONALLM_LANGCHAIN_LCEL_WORKFLOW_CLASS_NAME = 'FoundationaLLMLangChainLCELWorkflow'
22
+ FOUNDATIONALLM_LANGGRAPH_REACT_WORKFLOW_CLASS_NAME = 'FoundationaLLMLangGraphReActAgentWorkflow'
23
+
24
+ def __init__(self):
25
+ super().__init__()
26
+
27
+ def create_workflow(self,
28
+ workflow_config: GenericAgentWorkflow | AgentWorkflowBase,
29
+ objects: dict,
30
+ tools: List[AgentTool],
31
+ operations_manager: OperationsManager,
32
+ user_identity: UserIdentity,
33
+ config: Configuration,
34
+ intercept_http_calls: bool = False) -> FoundationaLLMWorkflowBase:
35
+ """
36
+ Creates a workflow instance based on the workflow configuration.
37
+
38
+ Parameters
39
+ ----------
40
+ workflow_config : GenericAgentWorkflow | AgentWorkflowBase
41
+ The workflow configuration.
42
+ objects : dict
43
+ The exploded objects assigned from the agent.
44
+ tools : List[FoundationaLLMToolBase]
45
+ The tools assigned to the agent.
46
+ user_identity : UserIdentity
47
+ The user identity of the user initiating the request.
48
+ config : Configuration
49
+ The application configuration for FoundationaLLM.
50
+ intercept_http_calls : bool, optional
51
+ Whether to intercept HTTP calls made by the workflow, by default False.
52
+
53
+ Returns
54
+ -------
55
+ FoundationaLLMWorkflowBase
56
+ The workflow instance.
57
+ """
58
+ if workflow_config.class_name == FoundationaLLMAgentWorkflowLangChainPluginManager.FOUNDATIONALLM_LANGCHAIN_AGENT_WORKFLOW_CLASS_NAME:
59
+ return FoundationaLLMLangChainAgentWorkflow(
60
+ workflow_config,
61
+ objects,
62
+ tools,
63
+ operations_manager,
64
+ user_identity,
65
+ config,
66
+ intercept_http_calls=intercept_http_calls)
67
+ if workflow_config.class_name == FoundationaLLMAgentWorkflowLangChainPluginManager.FOUNDATIONALLM_LANGCHAIN_LCEL_WORKFLOW_CLASS_NAME:
68
+ return FoundationaLLMLangChainLCELWorkflow(
69
+ workflow_config,
70
+ objects,
71
+ tools,
72
+ operations_manager,
73
+ user_identity,
74
+ config,
75
+ intercept_http_calls=intercept_http_calls)
76
+ if workflow_config.class_name == FoundationaLLMAgentWorkflowLangChainPluginManager.FOUNDATIONALLM_LANGGRAPH_REACT_WORKFLOW_CLASS_NAME:
77
+ return FoundationaLLMLangGraphReActAgentWorkflow(
78
+ workflow_config,
79
+ objects,
80
+ tools,
81
+ operations_manager,
82
+ user_identity,
83
+ config,
84
+ intercept_http_calls=intercept_http_calls)
85
+ raise ValueError(f'Unknown workflow name: {workflow_config.name}')
86
+
87
+ def refresh_tools(self):
88
+ print('Refreshing tools...')
@@ -0,0 +1,3 @@
1
+ from .foundationallm_langchain_agent_workflow import FoundationaLLMLangChainAgentWorkflow
2
+ from .foundationallm_langchain_lcel_workflow import FoundationaLLMLangChainLCELWorkflow
3
+ from .foundationallm_langgraph_react_agent_workflow import FoundationaLLMLangGraphReActAgentWorkflow
@@ -0,0 +1,237 @@
1
+ """
2
+ Class: FoundationaLLMLangChainAgentWorkflow
3
+ Description: FoundationaLLM agent workflow based on the built-in LangChain ReAct Agent.
4
+ """
5
+
6
+ import time
7
+ from dataclasses import dataclass
8
+ from typing import Dict, List, Optional
9
+ from opentelemetry.trace import SpanKind
10
+
11
+ from langchain_core.messages import (
12
+ AIMessage,
13
+ BaseMessage,
14
+ HumanMessage,
15
+ ToolMessage
16
+ )
17
+ from langchain.agents import create_agent
18
+
19
+ from foundationallm.langchain.common import (
20
+ FoundationaLLMWorkflowBase,
21
+ FoundationaLLMToolBase
22
+ )
23
+ from foundationallm.config import (
24
+ Configuration,
25
+ UserIdentity
26
+ )
27
+ from foundationallm.models.agents import (
28
+ GenericAgentWorkflow,
29
+ AgentWorkflowBase
30
+ )
31
+ from foundationallm.models.constants import (
32
+ AgentCapabilityCategories
33
+ )
34
+ from foundationallm.models.messages import MessageHistoryItem
35
+ from foundationallm.models.orchestration import (
36
+ CompletionRequestObjectKeys,
37
+ CompletionResponse,
38
+ ContentArtifact,
39
+ FileHistoryItem,
40
+ OpenAITextMessageContentItem
41
+ )
42
+ from foundationallm.operations import OperationsManager
43
+
44
+ @dataclass
45
+ class Context:
46
+ original_user_prompt: str
47
+ recursion_limit: int
48
+
49
+ class FoundationaLLMLangChainAgentWorkflow(FoundationaLLMWorkflowBase):
50
+ """
51
+ FoundationaLLM workflow based on the LangChain built-in ReAct Agent.
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ workflow_config: GenericAgentWorkflow | AgentWorkflowBase,
57
+ objects: Dict,
58
+ tools: List[FoundationaLLMToolBase],
59
+ operations_manager: OperationsManager,
60
+ user_identity: UserIdentity,
61
+ config: Configuration,
62
+ intercept_http_calls: bool = False
63
+ ):
64
+ """
65
+ Initializes the FoundationaLLMLangChainAgentWorkflow class with the workflow configuration.
66
+
67
+ Parameters
68
+ ----------
69
+ workflow_config : GenericAgentWorkflow | AgentWorkflowBase
70
+ The workflow assigned to the agent.
71
+ objects : dict
72
+ The exploded objects assigned from the agent.
73
+ tools : List[FoundationaLLMToolBase]
74
+ The tools assigned to the agent.
75
+ user_identity : UserIdentity
76
+ The user identity of the user initiating the request.
77
+ config : Configuration
78
+ The application configuration for FoundationaLLM.
79
+ intercept_http_calls : bool, optional
80
+ Whether to intercept HTTP calls made by the workflow, by default False.
81
+ """
82
+ super().__init__(workflow_config, objects, tools, operations_manager, user_identity, config)
83
+ self.name = workflow_config.name
84
+ self.default_error_message = workflow_config.properties.get(
85
+ 'default_error_message',
86
+ 'An error occurred while processing the request.') \
87
+ if workflow_config.properties else 'An error occurred while processing the request.'
88
+
89
+ # Sets self.workflow_llm
90
+ self.create_workflow_llm(intercept_http_calls=intercept_http_calls)
91
+ self.instance_id = objects.get(CompletionRequestObjectKeys.INSTANCE_ID, None)
92
+
93
+ async def invoke_async(
94
+ self,
95
+ operation_id: str,
96
+ user_prompt:str,
97
+ user_prompt_rewrite: Optional[str],
98
+ message_history: List[MessageHistoryItem],
99
+ file_history: List[FileHistoryItem],
100
+ conversation_id: Optional[str] = None,
101
+ objects: dict = None
102
+ )-> CompletionResponse:
103
+
104
+ """
105
+ Invokes the workflow asynchronously.
106
+
107
+ Parameters
108
+ ----------
109
+ operation_id : str
110
+ The unique identifier of the FoundationaLLM operation.
111
+ user_prompt : str
112
+ The user prompt message.
113
+ user_prompt_rewrite : str
114
+ The user prompt rewrite message containing additional context to clarify the user's intent.
115
+ message_history : List[BaseMessage]
116
+ The message history.
117
+ file_history : List[FileHistoryItem]
118
+ The file history.
119
+ conversation_id : Optional[str]
120
+ The conversation identifier for the workflow execution.
121
+ objects : dict
122
+ The exploded objects assigned from the agent. This is used to pass additional context to the workflow.
123
+ """
124
+
125
+ workflow_start_time = time.time()
126
+
127
+ if objects is None:
128
+ objects = {}
129
+
130
+ content_artifacts: List[ContentArtifact] = []
131
+ input_tokens = 0
132
+ output_tokens = 0
133
+
134
+ llm_prompt = user_prompt_rewrite or user_prompt
135
+ workflow_main_prompt = self.create_workflow_main_prompt()
136
+
137
+ message_list = self.__get_message_list(
138
+ llm_prompt,
139
+ message_history,
140
+ objects
141
+ )
142
+
143
+ graph = create_agent(
144
+ model=self.workflow_llm,
145
+ tools=self.tools,
146
+ system_prompt=workflow_main_prompt,
147
+ context_schema=Context
148
+ )
149
+
150
+ # This is a port of the previous graph recursion limit handling
151
+ # TODO: Clarify if this still has an effect with the new LangGraph implementation
152
+ graph_recursion_limit = self.workflow_config.properties.get('graph_recursion_limit', None) if self.workflow_config.properties else None
153
+
154
+ response = await graph.ainvoke(
155
+ { "messages": message_list },
156
+ context=Context(
157
+ original_user_prompt=llm_prompt,
158
+ recursion_limit=graph_recursion_limit))
159
+
160
+ # TODO: process tool messages with analysis results AIMessage with content='' but has addition_kwargs={'tool_calls';[...]}
161
+
162
+ # Get ContentArtifact items from ToolMessages
163
+ tool_messages = [message for message in response["messages"] if isinstance(message, ToolMessage)]
164
+ for tool_message in tool_messages:
165
+ if tool_message.artifact is not None:
166
+ # if the tool message artifact is a list, check if it contains a ContentArtifact item
167
+ if isinstance(tool_message.artifact, list):
168
+ for item in tool_message.artifact:
169
+ if isinstance(item, ContentArtifact):
170
+ content_artifacts.append(item)
171
+
172
+ final_message = response["messages"][-1]
173
+ response_content = OpenAITextMessageContentItem(
174
+ value = final_message.content,
175
+ agent_capability_category = AgentCapabilityCategories.FOUNDATIONALLM_KNOWLEDGE_MANAGEMENT
176
+ )
177
+
178
+ workflow_end_time = time.time()
179
+ output_tokens = final_message.usage_metadata["output_tokens"] or 0
180
+ input_tokens = final_message.usage_metadata["input_tokens"] or 0
181
+
182
+
183
+ workflow_content_artifact = self.create_workflow_execution_content_artifact(
184
+ llm_prompt,
185
+ input_tokens,
186
+ output_tokens,
187
+ workflow_end_time - workflow_start_time)
188
+ content_artifacts.append(workflow_content_artifact)
189
+
190
+ retvalue = CompletionResponse(
191
+ operation_id=operation_id,
192
+ content = [response_content],
193
+ content_artifacts=content_artifacts,
194
+ user_prompt=llm_prompt,
195
+ full_prompt=workflow_main_prompt,
196
+ completion_tokens=output_tokens,
197
+ prompt_tokens=input_tokens,
198
+ total_tokens=output_tokens + input_tokens,
199
+ total_cost=0
200
+ )
201
+ return retvalue
202
+
203
+ def __get_message_list(
204
+ self,
205
+ llm_prompt: str,
206
+ message_history: List[MessageHistoryItem],
207
+ objects: dict
208
+ ) -> List[BaseMessage]:
209
+ """
210
+ Returns the message history in the format required by the workflow.
211
+
212
+ Parameters
213
+ ----------
214
+ llm_prompt : str
215
+ The LLM prompt to be processed.
216
+ message_history : List[MessageHistoryItem]
217
+ The message history to be processed.
218
+ objects : dict
219
+ The exploded objects assigned from the agent. This is used to pass additional context to the workflow.
220
+ """
221
+
222
+ if objects is None:
223
+ objects = {}
224
+
225
+ # Convert message history to LangChain message types
226
+ messages = []
227
+ for message in message_history:
228
+ # Convert MessageHistoryItem to appropriate LangChain message type
229
+ if message.sender == "User":
230
+ messages.append(HumanMessage(content=message.text))
231
+ else:
232
+ messages.append(AIMessage(content=message.text))
233
+
234
+ return [
235
+ *messages,
236
+ HumanMessage(content=llm_prompt)
237
+ ]
@@ -0,0 +1,285 @@
1
+ """
2
+ Class: FoundationaLLMLangChainLCELWorkflow
3
+ Description: FoundationaLLM agent workflow based on LangChain LCEL.
4
+ """
5
+
6
+ import time
7
+ from typing import Dict, List, Optional
8
+ from opentelemetry.trace import SpanKind
9
+
10
+ from langchain_classic.callbacks import get_openai_callback
11
+ from langchain_core.output_parsers import StrOutputParser
12
+ from langchain_core.prompts import PromptTemplate
13
+ from langchain_core.runnables import RunnablePassthrough, RunnableLambda
14
+
15
+ from foundationallm.config import (
16
+ Configuration,
17
+ UserIdentity
18
+ )
19
+ from foundationallm.langchain.common import (
20
+ FoundationaLLMWorkflowBase,
21
+ FoundationaLLMToolBase
22
+ )
23
+ from foundationallm.langchain.exceptions import LangChainException
24
+ from foundationallm.models.agents import (
25
+ GenericAgentWorkflow,
26
+ AgentWorkflowBase
27
+ )
28
+ from foundationallm.models.constants import (
29
+ AgentCapabilityCategories
30
+ )
31
+ from foundationallm.models.language_models import LanguageModelProvider
32
+ from foundationallm.models.messages import MessageHistoryItem
33
+ from foundationallm.models.orchestration import (
34
+ CompletionRequestObjectKeys,
35
+ CompletionResponse,
36
+ ContentArtifact,
37
+ FileHistoryItem,
38
+ OpenAITextMessageContentItem
39
+ )
40
+ from foundationallm.operations import OperationsManager
41
+
42
+
43
+ class FoundationaLLMLangChainLCELWorkflow(FoundationaLLMWorkflowBase):
44
+ """
45
+ FoundationaLLM workflow based on LangChain LCEL.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ workflow_config: GenericAgentWorkflow | AgentWorkflowBase,
51
+ objects: Dict,
52
+ tools: List[FoundationaLLMToolBase],
53
+ operations_manager: OperationsManager,
54
+ user_identity: UserIdentity,
55
+ config: Configuration,
56
+ intercept_http_calls: bool = False
57
+ ):
58
+ """
59
+ Initializes the FoundationaLLMLangChainLCELWorkflow class with the workflow configuration.
60
+
61
+ Parameters
62
+ ----------
63
+ workflow_config : GenericAgentWorkflow | AgentWorkflowBase
64
+ The workflow assigned to the agent.
65
+ objects : dict
66
+ The exploded objects assigned from the agent.
67
+ tools : List[FoundationaLLMToolBase]
68
+ The tools assigned to the agent.
69
+ user_identity : UserIdentity
70
+ The user identity of the user initiating the request.
71
+ config : Configuration
72
+ The application configuration for FoundationaLLM.
73
+ intercept_http_calls : bool, optional
74
+ Whether to intercept HTTP calls made by the workflow, by default False.
75
+ """
76
+ super().__init__(workflow_config, objects, tools, operations_manager, user_identity, config)
77
+ self.name = workflow_config.name
78
+ self.default_error_message = workflow_config.properties.get(
79
+ 'default_error_message',
80
+ 'An error occurred while processing the request.') \
81
+ if workflow_config.properties else 'An error occurred while processing the request.'
82
+
83
+ # Sets self.workflow_llm
84
+ self.create_workflow_llm(intercept_http_calls=intercept_http_calls)
85
+ self.instance_id = objects.get(CompletionRequestObjectKeys.INSTANCE_ID, None)
86
+
87
+ async def invoke_async(
88
+ self,
89
+ operation_id: str,
90
+ user_prompt:str,
91
+ user_prompt_rewrite: Optional[str],
92
+ message_history: List[MessageHistoryItem],
93
+ file_history: List[FileHistoryItem],
94
+ conversation_id: Optional[str] = None,
95
+ objects: dict = None
96
+ )-> CompletionResponse:
97
+
98
+ """
99
+ Invokes the workflow asynchronously.
100
+
101
+ Parameters
102
+ ----------
103
+ operation_id : str
104
+ The unique identifier of the FoundationaLLM operation.
105
+ user_prompt : str
106
+ The user prompt message.
107
+ user_prompt_rewrite : str
108
+ The user prompt rewrite message containing additional context to clarify the user's intent.
109
+ message_history : List[BaseMessage]
110
+ The message history.
111
+ file_history : List[FileHistoryItem]
112
+ The file history.
113
+ conversation_id : Optional[str]
114
+ The conversation identifier for the workflow execution.
115
+ objects : dict
116
+ The exploded objects assigned from the agent. This is used to pass additional context to the workflow.
117
+ """
118
+
119
+ workflow_start_time = time.time()
120
+
121
+ if objects is None:
122
+ objects = {}
123
+
124
+ content_artifacts: List[ContentArtifact] = []
125
+ input_tokens = 0
126
+ output_tokens = 0
127
+
128
+ llm_prompt = user_prompt_rewrite or user_prompt
129
+ workflow_main_prompt = self.create_workflow_main_prompt()
130
+
131
+ # Get the prompt template.
132
+ prompt_template = self.__get_prompt_template(
133
+ workflow_main_prompt,
134
+ message_history
135
+ )
136
+
137
+ chain_context = { "context": RunnablePassthrough() }
138
+
139
+ # Compose LCEL chain
140
+ chain = (
141
+ chain_context
142
+ | prompt_template
143
+ | RunnableLambda(self.__record_full_prompt)
144
+ | self.workflow_llm
145
+ )
146
+
147
+ retvalue = None
148
+
149
+ ai_model = self.get_workflow_main_model_definition()
150
+ api_endpoint = self.get_ai_model_api_endpoint_configuration(ai_model)
151
+
152
+ if api_endpoint.provider == LanguageModelProvider.MICROSOFT or api_endpoint.provider == LanguageModelProvider.OPENAI:
153
+ # OpenAI compatible models
154
+ with get_openai_callback() as cb:
155
+ # add output parser to openai callback
156
+ chain = chain | StrOutputParser()
157
+ try:
158
+ with self.tracer.start_as_current_span('langchain_invoke_lcel_chain', kind=SpanKind.SERVER):
159
+ completion = await chain.ainvoke(llm_prompt)
160
+
161
+ workflow_end_time = time.time()
162
+
163
+ response_content = OpenAITextMessageContentItem(
164
+ value = completion,
165
+ agent_capability_category = AgentCapabilityCategories.FOUNDATIONALLM_KNOWLEDGE_MANAGEMENT
166
+ )
167
+
168
+ output_tokens = cb.completion_tokens
169
+ input_tokens = cb.prompt_tokens
170
+
171
+ workflow_content_artifact = self.create_workflow_execution_content_artifact(
172
+ llm_prompt,
173
+ input_tokens,
174
+ output_tokens,
175
+ workflow_end_time - workflow_start_time)
176
+ content_artifacts.append(workflow_content_artifact)
177
+
178
+ retvalue = CompletionResponse(
179
+ operation_id=operation_id,
180
+ content = [response_content],
181
+ content_artifacts=content_artifacts,
182
+ user_prompt=llm_prompt,
183
+ full_prompt=self.full_prompt.text,
184
+ completion_tokens = output_tokens,
185
+ prompt_tokens = input_tokens,
186
+ total_tokens = input_tokens + output_tokens,
187
+ total_cost = cb.total_cost
188
+ )
189
+
190
+ except Exception as e:
191
+ raise LangChainException(f"An unexpected exception occurred when executing the completion request: {str(e)}", 500)
192
+ else:
193
+ with self.tracer.start_as_current_span('langchain_invoke_lcel_chain', kind=SpanKind.SERVER):
194
+ completion = await chain.ainvoke(llm_prompt)
195
+
196
+ workflow_end_time = time.time()
197
+
198
+ response_content = OpenAITextMessageContentItem(
199
+ value = completion.content,
200
+ agent_capability_category = AgentCapabilityCategories.FOUNDATIONALLM_KNOWLEDGE_MANAGEMENT
201
+ )
202
+
203
+ output_tokens = completion.usage_metadata["output_tokens"]
204
+ input_tokens = completion.usage_metadata["input_tokens"]
205
+
206
+ workflow_content_artifact = self.create_workflow_execution_content_artifact(
207
+ llm_prompt,
208
+ input_tokens,
209
+ output_tokens,
210
+ workflow_end_time - workflow_start_time)
211
+ content_artifacts.append(workflow_content_artifact)
212
+
213
+ retvalue = CompletionResponse(
214
+ operation_id=operation_id,
215
+ content = [response_content],
216
+ content_artifacts=content_artifacts,
217
+ user_prompt=llm_prompt,
218
+ full_prompt=self.full_prompt.text,
219
+ completion_tokens = output_tokens,
220
+ prompt_tokens = input_tokens,
221
+ total_tokens = input_tokens + output_tokens,
222
+ total_cost=0
223
+ )
224
+
225
+ return retvalue
226
+
227
+ def __build_conversation_history(
228
+ self,
229
+ messages:List[MessageHistoryItem]=None
230
+ ) -> str:
231
+ """
232
+ Builds a chat history string from a list of MessageHistoryItem objects to
233
+ be added to the prompt for the completion request.
234
+
235
+ Parameters
236
+ ----------
237
+ messages : List[MessageHistoryItem]
238
+ The list of messages from which to build the chat history.
239
+ message_count : int
240
+ The number of messages to include in the chat history.
241
+ """
242
+ if messages is None or len(messages)==0:
243
+ return ""
244
+ chat_history = "Chat History:\n"
245
+ for msg in messages:
246
+ chat_history += msg.sender + ": " + msg.text + "\n"
247
+ chat_history += "\n\n"
248
+ return chat_history
249
+
250
+ def __get_prompt_template(
251
+ self,
252
+ prompt: str,
253
+ message_history: List[MessageHistoryItem]
254
+ ) -> PromptTemplate:
255
+ """
256
+ Build a prompt template.
257
+ """
258
+
259
+ prompt_builder = f'{prompt}\n\n'
260
+
261
+ # Add the message history, if it exists.
262
+ prompt_builder += self.__build_conversation_history(message_history)
263
+
264
+ # Insert the context into the template.
265
+ prompt_builder += '{context}'
266
+
267
+ # Create the prompt template.
268
+ return PromptTemplate.from_template(prompt_builder)
269
+
270
+ def __record_full_prompt(self, prompt: str) -> str:
271
+ """
272
+ Records the full prompt for the completion request.
273
+
274
+ Parameters
275
+ ----------
276
+ prompt : str
277
+ The prompt that is populated with context.
278
+
279
+ Returns
280
+ -------
281
+ str
282
+ Returns the full prompt.
283
+ """
284
+ self.full_prompt = prompt
285
+ return prompt
@@ -0,0 +1,225 @@
1
+ """
2
+ Class: FoundationaLLMLangGraphReActAgentWorkflow
3
+ Description: FoundationaLLM agent workflow based on the LangChain LangGraph ReAct Agent.
4
+ """
5
+
6
+ import time
7
+ from typing import Dict, List, Optional
8
+ from opentelemetry.trace import SpanKind
9
+
10
+ from langchain_core.messages import (
11
+ AIMessage,
12
+ BaseMessage,
13
+ HumanMessage,
14
+ ToolMessage
15
+ )
16
+ from langgraph.prebuilt import create_react_agent
17
+
18
+ from foundationallm.langchain.common import (
19
+ FoundationaLLMWorkflowBase,
20
+ FoundationaLLMToolBase
21
+ )
22
+ from foundationallm.config import (
23
+ Configuration,
24
+ UserIdentity
25
+ )
26
+ from foundationallm.models.agents import (
27
+ GenericAgentWorkflow,
28
+ AgentWorkflowBase
29
+ )
30
+ from foundationallm.models.constants import (
31
+ AgentCapabilityCategories
32
+ )
33
+ from foundationallm.models.messages import MessageHistoryItem
34
+ from foundationallm.models.orchestration import (
35
+ CompletionRequestObjectKeys,
36
+ CompletionResponse,
37
+ ContentArtifact,
38
+ FileHistoryItem,
39
+ OpenAITextMessageContentItem
40
+ )
41
+ from foundationallm.operations import OperationsManager
42
+
43
+
44
+ class FoundationaLLMLangGraphReActAgentWorkflow(FoundationaLLMWorkflowBase):
45
+ """
46
+ FoundationaLLM workflow based on the LangGraph ReAct Agent.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ workflow_config: GenericAgentWorkflow | AgentWorkflowBase,
52
+ objects: Dict,
53
+ tools: List[FoundationaLLMToolBase],
54
+ operations_manager: OperationsManager,
55
+ user_identity: UserIdentity,
56
+ config: Configuration,
57
+ intercept_http_calls: bool = False
58
+ ):
59
+ """
60
+ Initializes the FoundationaLLMLangGraphReActAgentWorkflow class with the workflow configuration.
61
+
62
+ Parameters
63
+ ----------
64
+ workflow_config : GenericAgentWorkflow | AgentWorkflowBase
65
+ The workflow assigned to the agent.
66
+ objects : dict
67
+ The exploded objects assigned from the agent.
68
+ tools : List[FoundationaLLMToolBase]
69
+ The tools assigned to the agent.
70
+ user_identity : UserIdentity
71
+ The user identity of the user initiating the request.
72
+ config : Configuration
73
+ The application configuration for FoundationaLLM.
74
+ intercept_http_calls : bool, optional
75
+ Whether to intercept HTTP calls made by the workflow, by default False.
76
+ """
77
+ super().__init__(workflow_config, objects, tools, operations_manager, user_identity, config)
78
+ self.name = workflow_config.name
79
+ self.default_error_message = workflow_config.properties.get(
80
+ 'default_error_message',
81
+ 'An error occurred while processing the request.') \
82
+ if workflow_config.properties else 'An error occurred while processing the request.'
83
+
84
+ # Sets self.workflow_llm
85
+ self.create_workflow_llm(intercept_http_calls=intercept_http_calls)
86
+ self.instance_id = objects.get(CompletionRequestObjectKeys.INSTANCE_ID, None)
87
+
88
+ async def invoke_async(
89
+ self,
90
+ operation_id: str,
91
+ user_prompt:str,
92
+ user_prompt_rewrite: Optional[str],
93
+ message_history: List[MessageHistoryItem],
94
+ file_history: List[FileHistoryItem],
95
+ conversation_id: Optional[str] = None,
96
+ objects: dict = None
97
+ )-> CompletionResponse:
98
+
99
+ """
100
+ Invokes the workflow asynchronously.
101
+
102
+ Parameters
103
+ ----------
104
+ operation_id : str
105
+ The unique identifier of the FoundationaLLM operation.
106
+ user_prompt : str
107
+ The user prompt message.
108
+ user_prompt_rewrite : str
109
+ The user prompt rewrite message containing additional context to clarify the user's intent.
110
+ message_history : List[BaseMessage]
111
+ The message history.
112
+ file_history : List[FileHistoryItem]
113
+ The file history.
114
+ conversation_id : Optional[str]
115
+ The conversation identifier for the workflow execution.
116
+ objects : dict
117
+ The exploded objects assigned from the agent. This is used to pass additional context to the workflow.
118
+ """
119
+
120
+ workflow_start_time = time.time()
121
+
122
+ if objects is None:
123
+ objects = {}
124
+
125
+ content_artifacts: List[ContentArtifact] = []
126
+ input_tokens = 0
127
+ output_tokens = 0
128
+
129
+ llm_prompt = user_prompt_rewrite or user_prompt
130
+ workflow_main_prompt = self.create_workflow_main_prompt()
131
+
132
+ message_list = self.__get_message_list(
133
+ llm_prompt,
134
+ message_history,
135
+ objects
136
+ )
137
+
138
+ graph = create_react_agent(
139
+ model=self.workflow_llm,
140
+ tools=self.tools,
141
+ prompt=workflow_main_prompt
142
+ )
143
+
144
+ response = await graph.ainvoke(
145
+ { "messages": message_list },
146
+ config= {"configurable": {"original_user_prompt": llm_prompt, **({"recursion_limit": self.workflow_config.graph_recursion_limit} if self.workflow_config.graph_recursion_limit is not None else {})}}
147
+ )
148
+
149
+ # TODO: process tool messages with analysis results AIMessage with content='' but has addition_kwargs={'tool_calls';[...]}
150
+
151
+ # Get ContentArtifact items from ToolMessages
152
+ tool_messages = [message for message in response["messages"] if isinstance(message, ToolMessage)]
153
+ for tool_message in tool_messages:
154
+ if tool_message.artifact is not None:
155
+ # if the tool message artifact is a list, check if it contains a ContentArtifact item
156
+ if isinstance(tool_message.artifact, list):
157
+ for item in tool_message.artifact:
158
+ if isinstance(item, ContentArtifact):
159
+ content_artifacts.append(item)
160
+
161
+ final_message = response["messages"][-1]
162
+ response_content = OpenAITextMessageContentItem(
163
+ value = final_message.content,
164
+ agent_capability_category = AgentCapabilityCategories.FOUNDATIONALLM_KNOWLEDGE_MANAGEMENT
165
+ )
166
+
167
+ workflow_end_time = time.time()
168
+ output_tokens = final_message.usage_metadata["output_tokens"] or 0
169
+ input_tokens = final_message.usage_metadata["input_tokens"] or 0
170
+
171
+ workflow_content_artifact = self.create_workflow_execution_content_artifact(
172
+ llm_prompt,
173
+ input_tokens,
174
+ output_tokens,
175
+ workflow_end_time - workflow_start_time)
176
+ content_artifacts.append(workflow_content_artifact)
177
+
178
+ retvalue = CompletionResponse(
179
+ operation_id=operation_id,
180
+ content = [response_content],
181
+ content_artifacts=content_artifacts,
182
+ user_prompt=llm_prompt,
183
+ full_prompt=workflow_main_prompt,
184
+ completion_tokens=output_tokens,
185
+ prompt_tokens=input_tokens,
186
+ total_tokens=output_tokens + input_tokens,
187
+ total_cost=0
188
+ )
189
+ return retvalue
190
+
191
+ def __get_message_list(
192
+ self,
193
+ llm_prompt: str,
194
+ message_history: List[MessageHistoryItem],
195
+ objects: dict
196
+ ) -> List[BaseMessage]:
197
+ """
198
+ Returns the message history in the format required by the workflow.
199
+
200
+ Parameters
201
+ ----------
202
+ llm_prompt : str
203
+ The LLM prompt to be processed.
204
+ message_history : List[MessageHistoryItem]
205
+ The message history to be processed.
206
+ objects : dict
207
+ The exploded objects assigned from the agent. This is used to pass additional context to the workflow.
208
+ """
209
+
210
+ if objects is None:
211
+ objects = {}
212
+
213
+ # Convert message history to LangChain message types
214
+ messages = []
215
+ for message in message_history:
216
+ # Convert MessageHistoryItem to appropriate LangChain message type
217
+ if message.sender == "User":
218
+ messages.append(HumanMessage(content=message.text))
219
+ else:
220
+ messages.append(AIMessage(content=message.text))
221
+
222
+ return [
223
+ *messages,
224
+ HumanMessage(content=llm_prompt)
225
+ ]
@@ -0,0 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: foundationallm-agent-plugins-langchain
3
+ Version: 0.9.7.post3
4
+ Summary: FoundationaLLM Agent plugins.
5
+ Author-email: FoundationaLLM <dev@foundationallm.ai>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://foundationallm.ai
8
+ Project-URL: Issues, https://github.com/foundationallm/foundationallm/issues
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.11
12
+ Description-Content-Type: text/markdown
13
+
14
+ # FoundationaLLM Agent Plugins LangChain
15
+
16
+ This package provides the FoundationaLLM agent plugins built on LangChain.
17
+
18
+ To package and publish the package run the following commands (in the folder where `pyproject.toml` is located):
19
+
20
+ ```cmd
21
+ python -m pip install --upgrade build
22
+ python -m pip install --upgrade twine
23
+
24
+ python -m build
25
+ python -m twine upload dist/*
26
+ ```
@@ -0,0 +1,13 @@
1
+ README.md
2
+ pyproject.toml
3
+ foundationallm_agent_plugins_langchain/__init__.py
4
+ foundationallm_agent_plugins_langchain/workflow_plugin_manager.py
5
+ foundationallm_agent_plugins_langchain.egg-info/PKG-INFO
6
+ foundationallm_agent_plugins_langchain.egg-info/SOURCES.txt
7
+ foundationallm_agent_plugins_langchain.egg-info/dependency_links.txt
8
+ foundationallm_agent_plugins_langchain.egg-info/top_level.txt
9
+ foundationallm_agent_plugins_langchain/_metadata/foundationallm_manifest.json
10
+ foundationallm_agent_plugins_langchain/workflows/__init__.py
11
+ foundationallm_agent_plugins_langchain/workflows/foundationallm_langchain_agent_workflow.py
12
+ foundationallm_agent_plugins_langchain/workflows/foundationallm_langchain_lcel_workflow.py
13
+ foundationallm_agent_plugins_langchain/workflows/foundationallm_langgraph_react_agent_workflow.py
@@ -0,0 +1,24 @@
1
+ [project]
2
+ name = "foundationallm-agent-plugins-langchain"
3
+ version = "0.9.7.post3"
4
+ authors = [
5
+ { name="FoundationaLLM", email="dev@foundationallm.ai" },
6
+ ]
7
+ description = "FoundationaLLM Agent plugins."
8
+ readme = "README.md"
9
+ requires-python = ">=3.11"
10
+ license = "MIT"
11
+ classifiers = [
12
+ "Programming Language :: Python :: 3",
13
+ "Operating System :: OS Independent",
14
+ ]
15
+
16
+ [project.urls]
17
+ Homepage = "https://foundationallm.ai"
18
+ Issues = "https://github.com/foundationallm/foundationallm/issues"
19
+
20
+ [tool.setuptools.packages.find]
21
+ include = ['*']
22
+
23
+ [tool.setuptools.package-data]
24
+ foundationallm_agent_plugins_langchain = ["_metadata/foundationallm_manifest.json"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+