alita-sdk 0.3.391__py3-none-any.whl → 0.3.393__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/langchain/assistant.py +13 -0
- alita_sdk/runtime/langchain/langraph_agent.py +18 -10
- alita_sdk/runtime/tools/llm.py +134 -114
- alita_sdk/tools/base_indexer_toolkit.py +25 -0
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +10 -2
- {alita_sdk-0.3.391.dist-info → alita_sdk-0.3.393.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.391.dist-info → alita_sdk-0.3.393.dist-info}/RECORD +11 -11
- {alita_sdk-0.3.391.dist-info → alita_sdk-0.3.393.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.391.dist-info → alita_sdk-0.3.393.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.391.dist-info → alita_sdk-0.3.393.dist-info}/top_level.txt +0 -0
|
@@ -263,6 +263,9 @@ class Assistant:
|
|
|
263
263
|
schema_dict = {
|
|
264
264
|
'name': 'react_agent',
|
|
265
265
|
'state': {
|
|
266
|
+
'input': {
|
|
267
|
+
'type': 'str'
|
|
268
|
+
},
|
|
266
269
|
'messages': state_messages_config
|
|
267
270
|
},
|
|
268
271
|
'nodes': [{
|
|
@@ -271,6 +274,16 @@ class Assistant:
|
|
|
271
274
|
'prompt': {
|
|
272
275
|
'template': escaped_prompt
|
|
273
276
|
},
|
|
277
|
+
'input_mapping': {
|
|
278
|
+
'system': {
|
|
279
|
+
'type': 'fixed',
|
|
280
|
+
'value': escaped_prompt
|
|
281
|
+
},
|
|
282
|
+
'task': {
|
|
283
|
+
'type': 'variable',
|
|
284
|
+
'value': 'input'
|
|
285
|
+
}
|
|
286
|
+
},
|
|
274
287
|
'input': ['messages'],
|
|
275
288
|
'output': ['messages'],
|
|
276
289
|
'transition': 'END'
|
|
@@ -348,8 +348,8 @@ class StateModifierNode(Runnable):
|
|
|
348
348
|
return result
|
|
349
349
|
|
|
350
350
|
|
|
351
|
-
|
|
352
|
-
|
|
351
|
+
def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
|
|
352
|
+
state_class=None, output_variables=None):
|
|
353
353
|
# prepare output channels
|
|
354
354
|
if interrupt_after is None:
|
|
355
355
|
interrupt_after = []
|
|
@@ -466,7 +466,7 @@ def create_graph(
|
|
|
466
466
|
elif node_type == 'agent':
|
|
467
467
|
input_params = node.get('input', ['messages'])
|
|
468
468
|
input_mapping = node.get('input_mapping',
|
|
469
|
-
|
|
469
|
+
{'messages': {'type': 'variable', 'value': 'messages'}})
|
|
470
470
|
lg_builder.add_node(node_id, FunctionTool(
|
|
471
471
|
client=client, tool=tool,
|
|
472
472
|
name=node_id, return_type='str',
|
|
@@ -481,7 +481,8 @@ def create_graph(
|
|
|
481
481
|
# wrap with mappings
|
|
482
482
|
pipeline_name = node.get('tool', None)
|
|
483
483
|
if not pipeline_name:
|
|
484
|
-
raise ValueError(
|
|
484
|
+
raise ValueError(
|
|
485
|
+
"Subgraph must have a 'tool' node: add required tool to the subgraph node")
|
|
485
486
|
node_fn = SubgraphRunnable(
|
|
486
487
|
inner=tool.graph,
|
|
487
488
|
name=pipeline_name,
|
|
@@ -520,7 +521,8 @@ def create_graph(
|
|
|
520
521
|
loop_toolkit_name = node.get('loop_toolkit_name')
|
|
521
522
|
loop_tool_name = node.get('loop_tool')
|
|
522
523
|
if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
|
|
523
|
-
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
524
|
+
loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
|
|
525
|
+
loop_tool_name)
|
|
524
526
|
for t in tools:
|
|
525
527
|
if t.name == loop_tool_name:
|
|
526
528
|
logger.debug(f"Loop tool discovered: {t}")
|
|
@@ -555,7 +557,8 @@ def create_graph(
|
|
|
555
557
|
break
|
|
556
558
|
elif node_type == 'code':
|
|
557
559
|
from ..tools.sandbox import create_sandbox_tool
|
|
558
|
-
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
|
|
560
|
+
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
|
|
561
|
+
alita_client=kwargs.get('alita_client', None))
|
|
559
562
|
code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
|
|
560
563
|
lg_builder.add_node(node_id, FunctionTool(
|
|
561
564
|
tool=sandbox_tool, name=node['id'], return_type='dict',
|
|
@@ -777,7 +780,13 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
777
780
|
# Convert chat history dict messages to LangChain message objects
|
|
778
781
|
chat_history = input.pop('chat_history')
|
|
779
782
|
input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
|
|
780
|
-
|
|
783
|
+
|
|
784
|
+
# handler for LLM node: if no input (Chat perspective), then take last human message
|
|
785
|
+
if not input.get('input'):
|
|
786
|
+
if input.get('messages'):
|
|
787
|
+
input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
|
|
788
|
+
None)]
|
|
789
|
+
|
|
781
790
|
# Append current input to existing messages instead of overwriting
|
|
782
791
|
if input.get('input'):
|
|
783
792
|
if isinstance(input['input'], str):
|
|
@@ -801,7 +810,8 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
801
810
|
else:
|
|
802
811
|
result = super().invoke(input, config=config, *args, **kwargs)
|
|
803
812
|
try:
|
|
804
|
-
output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
|
|
813
|
+
output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
|
|
814
|
+
result['messages'][-1].content)
|
|
805
815
|
except:
|
|
806
816
|
output = list(result.values())[-1]
|
|
807
817
|
config_state = self.get_state(config)
|
|
@@ -809,8 +819,6 @@ class LangGraphAgentRunnable(CompiledStateGraph):
|
|
|
809
819
|
if is_execution_finished:
|
|
810
820
|
thread_id = None
|
|
811
821
|
|
|
812
|
-
|
|
813
|
-
|
|
814
822
|
result_with_state = {
|
|
815
823
|
"output": output,
|
|
816
824
|
"thread_id": thread_id,
|
alita_sdk/runtime/tools/llm.py
CHANGED
|
@@ -123,9 +123,16 @@ class LLMNode(BaseTool):
|
|
|
123
123
|
for key, value in (self.structured_output_dict or {}).items()
|
|
124
124
|
}
|
|
125
125
|
struct_model = create_pydantic_model(f"LLMOutput", struct_params)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
126
|
+
completion = llm_client.invoke(messages, config=config)
|
|
127
|
+
if hasattr(completion, 'tool_calls') and completion.tool_calls:
|
|
128
|
+
new_messages, _ = self.__perform_tool_calling(completion, messages, llm_client, config)
|
|
129
|
+
llm = self.__get_struct_output_model(llm_client, struct_model)
|
|
130
|
+
completion = llm.invoke(new_messages, config=config)
|
|
131
|
+
result = completion.model_dump()
|
|
132
|
+
else:
|
|
133
|
+
llm = self.__get_struct_output_model(llm_client, struct_model)
|
|
134
|
+
completion = llm.invoke(messages, config=config)
|
|
135
|
+
result = completion.model_dump()
|
|
129
136
|
|
|
130
137
|
# Ensure messages are properly formatted
|
|
131
138
|
if result.get('messages') and isinstance(result['messages'], list):
|
|
@@ -139,117 +146,13 @@ class LLMNode(BaseTool):
|
|
|
139
146
|
# Handle both tool-calling and regular responses
|
|
140
147
|
if hasattr(completion, 'tool_calls') and completion.tool_calls:
|
|
141
148
|
# Handle iterative tool-calling and execution
|
|
142
|
-
new_messages = messages
|
|
143
|
-
max_iterations = 15
|
|
144
|
-
iteration = 0
|
|
145
|
-
|
|
146
|
-
# Continue executing tools until no more tool calls or max iterations reached
|
|
147
|
-
current_completion = completion
|
|
148
|
-
while (hasattr(current_completion, 'tool_calls') and
|
|
149
|
-
current_completion.tool_calls and
|
|
150
|
-
iteration < max_iterations):
|
|
151
|
-
|
|
152
|
-
iteration += 1
|
|
153
|
-
logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
|
|
154
|
-
|
|
155
|
-
# Execute each tool call in the current completion
|
|
156
|
-
tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
|
|
157
|
-
'__iter__') else []
|
|
158
|
-
|
|
159
|
-
for tool_call in tool_calls:
|
|
160
|
-
tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
|
|
161
|
-
'name',
|
|
162
|
-
'')
|
|
163
|
-
tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
|
|
164
|
-
'args',
|
|
165
|
-
{})
|
|
166
|
-
tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
|
|
167
|
-
tool_call, 'id', '')
|
|
168
|
-
|
|
169
|
-
# Find the tool in filtered tools
|
|
170
|
-
filtered_tools = self.get_filtered_tools()
|
|
171
|
-
tool_to_execute = None
|
|
172
|
-
for tool in filtered_tools:
|
|
173
|
-
if tool.name == tool_name:
|
|
174
|
-
tool_to_execute = tool
|
|
175
|
-
break
|
|
176
|
-
|
|
177
|
-
if tool_to_execute:
|
|
178
|
-
try:
|
|
179
|
-
logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
|
|
180
|
-
# Pass the underlying config to the tool execution invoke method
|
|
181
|
-
# since it may be another agent, graph, etc. to see it properly in thinking steps
|
|
182
|
-
tool_result = tool_to_execute.invoke(tool_args, config=config)
|
|
183
|
-
|
|
184
|
-
# Create tool message with result - preserve structured content
|
|
185
|
-
from langchain_core.messages import ToolMessage
|
|
186
|
-
|
|
187
|
-
# Check if tool_result is structured content (list of dicts)
|
|
188
|
-
# TODO: need solid check for being compatible with ToolMessage content format
|
|
189
|
-
if isinstance(tool_result, list) and all(
|
|
190
|
-
isinstance(item, dict) and 'type' in item for item in tool_result
|
|
191
|
-
):
|
|
192
|
-
# Use structured content directly for multimodal support
|
|
193
|
-
tool_message = ToolMessage(
|
|
194
|
-
content=tool_result,
|
|
195
|
-
tool_call_id=tool_call_id
|
|
196
|
-
)
|
|
197
|
-
else:
|
|
198
|
-
# Fallback to string conversion for other tool results
|
|
199
|
-
tool_message = ToolMessage(
|
|
200
|
-
content=str(tool_result),
|
|
201
|
-
tool_call_id=tool_call_id
|
|
202
|
-
)
|
|
203
|
-
new_messages.append(tool_message)
|
|
204
|
-
|
|
205
|
-
except Exception as e:
|
|
206
|
-
logger.error(f"Error executing tool '{tool_name}': {e}")
|
|
207
|
-
# Create error tool message
|
|
208
|
-
from langchain_core.messages import ToolMessage
|
|
209
|
-
tool_message = ToolMessage(
|
|
210
|
-
content=f"Error executing {tool_name}: {str(e)}",
|
|
211
|
-
tool_call_id=tool_call_id
|
|
212
|
-
)
|
|
213
|
-
new_messages.append(tool_message)
|
|
214
|
-
else:
|
|
215
|
-
logger.warning(f"Tool '{tool_name}' not found in available tools")
|
|
216
|
-
# Create error tool message for missing tool
|
|
217
|
-
from langchain_core.messages import ToolMessage
|
|
218
|
-
tool_message = ToolMessage(
|
|
219
|
-
content=f"Tool '{tool_name}' not available",
|
|
220
|
-
tool_call_id=tool_call_id
|
|
221
|
-
)
|
|
222
|
-
new_messages.append(tool_message)
|
|
223
|
-
|
|
224
|
-
# Call LLM again with tool results to get next response
|
|
225
|
-
try:
|
|
226
|
-
current_completion = llm_client.invoke(new_messages, config=config)
|
|
227
|
-
new_messages.append(current_completion)
|
|
228
|
-
|
|
229
|
-
# Check if we still have tool calls
|
|
230
|
-
if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
|
|
231
|
-
logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
|
|
232
|
-
else:
|
|
233
|
-
logger.info("LLM completed without requesting more tools")
|
|
234
|
-
break
|
|
235
|
-
|
|
236
|
-
except Exception as e:
|
|
237
|
-
logger.error(f"Error in LLM call during iteration {iteration}: {e}")
|
|
238
|
-
# Add error message and break the loop
|
|
239
|
-
error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
|
|
240
|
-
new_messages.append(AIMessage(content=error_msg))
|
|
241
|
-
break
|
|
242
|
-
|
|
243
|
-
# Log completion status
|
|
244
|
-
if iteration >= max_iterations:
|
|
245
|
-
logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
|
|
246
|
-
# Add a warning message to the chat
|
|
247
|
-
warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
|
|
248
|
-
new_messages.append(AIMessage(content=warning_msg))
|
|
249
|
-
else:
|
|
250
|
-
logger.info(f"Tool execution completed after {iteration} iterations")
|
|
149
|
+
new_messages, current_completion = self.__perform_tool_calling(completion, messages, llm_client, config)
|
|
251
150
|
|
|
252
|
-
|
|
151
|
+
output_msgs = {"messages": new_messages}
|
|
152
|
+
if self.output_variables:
|
|
153
|
+
output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
|
|
154
|
+
|
|
155
|
+
return output_msgs
|
|
253
156
|
else:
|
|
254
157
|
# Regular text response
|
|
255
158
|
content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
|
|
@@ -275,4 +178,121 @@ class LLMNode(BaseTool):
|
|
|
275
178
|
|
|
276
179
|
def _run(self, *args, **kwargs):
|
|
277
180
|
# Legacy support for old interface
|
|
278
|
-
return self.invoke(kwargs, **kwargs)
|
|
181
|
+
return self.invoke(kwargs, **kwargs)
|
|
182
|
+
|
|
183
|
+
def __perform_tool_calling(self, completion, messages, llm_client, config):
|
|
184
|
+
# Handle iterative tool-calling and execution
|
|
185
|
+
new_messages = messages + [completion]
|
|
186
|
+
max_iterations = 15
|
|
187
|
+
iteration = 0
|
|
188
|
+
|
|
189
|
+
# Continue executing tools until no more tool calls or max iterations reached
|
|
190
|
+
current_completion = completion
|
|
191
|
+
while (hasattr(current_completion, 'tool_calls') and
|
|
192
|
+
current_completion.tool_calls and
|
|
193
|
+
iteration < max_iterations):
|
|
194
|
+
|
|
195
|
+
iteration += 1
|
|
196
|
+
logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
|
|
197
|
+
|
|
198
|
+
# Execute each tool call in the current completion
|
|
199
|
+
tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
|
|
200
|
+
'__iter__') else []
|
|
201
|
+
|
|
202
|
+
for tool_call in tool_calls:
|
|
203
|
+
tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
|
|
204
|
+
'name',
|
|
205
|
+
'')
|
|
206
|
+
tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
|
|
207
|
+
'args',
|
|
208
|
+
{})
|
|
209
|
+
tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
|
|
210
|
+
tool_call, 'id', '')
|
|
211
|
+
|
|
212
|
+
# Find the tool in filtered tools
|
|
213
|
+
filtered_tools = self.get_filtered_tools()
|
|
214
|
+
tool_to_execute = None
|
|
215
|
+
for tool in filtered_tools:
|
|
216
|
+
if tool.name == tool_name:
|
|
217
|
+
tool_to_execute = tool
|
|
218
|
+
break
|
|
219
|
+
|
|
220
|
+
if tool_to_execute:
|
|
221
|
+
try:
|
|
222
|
+
logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
|
|
223
|
+
# Pass the underlying config to the tool execution invoke method
|
|
224
|
+
# since it may be another agent, graph, etc. to see it properly in thinking steps
|
|
225
|
+
tool_result = tool_to_execute.invoke(tool_args, config=config)
|
|
226
|
+
|
|
227
|
+
# Create tool message with result - preserve structured content
|
|
228
|
+
from langchain_core.messages import ToolMessage
|
|
229
|
+
|
|
230
|
+
# Check if tool_result is structured content (list of dicts)
|
|
231
|
+
# TODO: need solid check for being compatible with ToolMessage content format
|
|
232
|
+
if isinstance(tool_result, list) and all(
|
|
233
|
+
isinstance(item, dict) and 'type' in item for item in tool_result
|
|
234
|
+
):
|
|
235
|
+
# Use structured content directly for multimodal support
|
|
236
|
+
tool_message = ToolMessage(
|
|
237
|
+
content=tool_result,
|
|
238
|
+
tool_call_id=tool_call_id
|
|
239
|
+
)
|
|
240
|
+
else:
|
|
241
|
+
# Fallback to string conversion for other tool results
|
|
242
|
+
tool_message = ToolMessage(
|
|
243
|
+
content=str(tool_result),
|
|
244
|
+
tool_call_id=tool_call_id
|
|
245
|
+
)
|
|
246
|
+
new_messages.append(tool_message)
|
|
247
|
+
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.error(f"Error executing tool '{tool_name}': {e}")
|
|
250
|
+
# Create error tool message
|
|
251
|
+
from langchain_core.messages import ToolMessage
|
|
252
|
+
tool_message = ToolMessage(
|
|
253
|
+
content=f"Error executing {tool_name}: {str(e)}",
|
|
254
|
+
tool_call_id=tool_call_id
|
|
255
|
+
)
|
|
256
|
+
new_messages.append(tool_message)
|
|
257
|
+
else:
|
|
258
|
+
logger.warning(f"Tool '{tool_name}' not found in available tools")
|
|
259
|
+
# Create error tool message for missing tool
|
|
260
|
+
from langchain_core.messages import ToolMessage
|
|
261
|
+
tool_message = ToolMessage(
|
|
262
|
+
content=f"Tool '{tool_name}' not available",
|
|
263
|
+
tool_call_id=tool_call_id
|
|
264
|
+
)
|
|
265
|
+
new_messages.append(tool_message)
|
|
266
|
+
|
|
267
|
+
# Call LLM again with tool results to get next response
|
|
268
|
+
try:
|
|
269
|
+
current_completion = llm_client.invoke(new_messages, config=config)
|
|
270
|
+
new_messages.append(current_completion)
|
|
271
|
+
|
|
272
|
+
# Check if we still have tool calls
|
|
273
|
+
if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
|
|
274
|
+
logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
|
|
275
|
+
else:
|
|
276
|
+
logger.info("LLM completed without requesting more tools")
|
|
277
|
+
break
|
|
278
|
+
|
|
279
|
+
except Exception as e:
|
|
280
|
+
logger.error(f"Error in LLM call during iteration {iteration}: {e}")
|
|
281
|
+
# Add error message and break the loop
|
|
282
|
+
error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
|
|
283
|
+
new_messages.append(AIMessage(content=error_msg))
|
|
284
|
+
break
|
|
285
|
+
|
|
286
|
+
# Log completion status
|
|
287
|
+
if iteration >= max_iterations:
|
|
288
|
+
logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
|
|
289
|
+
# Add a warning message to the chat
|
|
290
|
+
warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
|
|
291
|
+
new_messages.append(AIMessage(content=warning_msg))
|
|
292
|
+
else:
|
|
293
|
+
logger.info(f"Tool execution completed after {iteration} iterations")
|
|
294
|
+
|
|
295
|
+
return new_messages, current_completion
|
|
296
|
+
|
|
297
|
+
def __get_struct_output_model(self, llm_client, pydantic_model):
|
|
298
|
+
return llm_client.with_structured_output(pydantic_model)
|
|
@@ -160,6 +160,8 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
160
160
|
if clean_index:
|
|
161
161
|
self._clean_index(index_name)
|
|
162
162
|
#
|
|
163
|
+
self.index_meta_init(index_name, kwargs)
|
|
164
|
+
#
|
|
163
165
|
self._log_tool_event(f"Indexing data into collection with suffix '{index_name}'. It can take some time...")
|
|
164
166
|
self._log_tool_event(f"Loading the documents to index...{kwargs}")
|
|
165
167
|
documents = self._base_loader(**kwargs)
|
|
@@ -454,6 +456,29 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
454
456
|
reranking_config=reranking_config,
|
|
455
457
|
extended_search=extended_search
|
|
456
458
|
)
|
|
459
|
+
|
|
460
|
+
def index_meta_init(self, index_name: str, index_configuration: dict[str, Any]):
|
|
461
|
+
index_meta = super().get_index_meta(index_name)
|
|
462
|
+
if not index_meta:
|
|
463
|
+
self._log_tool_event(
|
|
464
|
+
f"There is no existing index_meta for collection '{index_name}'. Initializing it.",
|
|
465
|
+
tool_name="index_data"
|
|
466
|
+
)
|
|
467
|
+
from ..runtime.langchain.interfaces.llm_processor import add_documents
|
|
468
|
+
created_on = time.time()
|
|
469
|
+
metadata = {
|
|
470
|
+
"collection": index_name,
|
|
471
|
+
"type": IndexerKeywords.INDEX_META_TYPE.value,
|
|
472
|
+
"indexed": 0,
|
|
473
|
+
"state": IndexerKeywords.INDEX_META_IN_PROGRESS.value,
|
|
474
|
+
"index_configuration": index_configuration,
|
|
475
|
+
"created_on": created_on,
|
|
476
|
+
"updated_on": created_on,
|
|
477
|
+
"history": "[]",
|
|
478
|
+
"task_id": None,
|
|
479
|
+
}
|
|
480
|
+
index_meta_doc = Document(page_content=f"{IndexerKeywords.INDEX_META_TYPE.value}_{index_name}", metadata=metadata)
|
|
481
|
+
add_documents(vectorstore=self.vectorstore, documents=[index_meta_doc])
|
|
457
482
|
|
|
458
483
|
def index_meta_update(self, index_name: str, state: str, result: int):
|
|
459
484
|
index_meta_raw = super().get_index_meta(index_name)
|
|
@@ -3,6 +3,7 @@ from typing import Optional, List
|
|
|
3
3
|
from logging import getLogger
|
|
4
4
|
|
|
5
5
|
import requests
|
|
6
|
+
from langchain_core.documents import Document
|
|
6
7
|
|
|
7
8
|
logger = getLogger(__name__)
|
|
8
9
|
from PIL import Image
|
|
@@ -193,6 +194,15 @@ class AlitaConfluenceLoader(ConfluenceLoader):
|
|
|
193
194
|
else:
|
|
194
195
|
return super().process_image(link, ocr_languages)
|
|
195
196
|
|
|
197
|
+
def process_page(self, page: dict, include_attachments: bool, include_comments: bool, include_labels: bool,
|
|
198
|
+
content_format: ContentFormat, ocr_languages: Optional[str] = None,
|
|
199
|
+
keep_markdown_format: Optional[bool] = False, keep_newlines: bool = False) -> Document:
|
|
200
|
+
if not page.get("title"):
|
|
201
|
+
# if 'include_restricted_content' set to True, draft pages are loaded and can have no title
|
|
202
|
+
page["title"] = "Untitled"
|
|
203
|
+
return super().process_page(page, include_attachments, include_comments, include_labels, content_format,
|
|
204
|
+
ocr_languages, keep_markdown_format, keep_newlines)
|
|
205
|
+
|
|
196
206
|
# TODO review usage
|
|
197
207
|
# def process_svg(
|
|
198
208
|
# self,
|
|
@@ -136,7 +136,15 @@ class PGVectorAdapter(VectorStoreAdapter):
|
|
|
136
136
|
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
137
137
|
# This logic deletes all data from the vectorstore collection without removal of collection.
|
|
138
138
|
# Collection itself remains available for future indexing.
|
|
139
|
-
|
|
139
|
+
from sqlalchemy.orm import Session
|
|
140
|
+
from sqlalchemy import func
|
|
141
|
+
|
|
142
|
+
store = vectorstore_wrapper.vectorstore
|
|
143
|
+
with Session(store.session_maker.bind) as session:
|
|
144
|
+
session.query(store.EmbeddingStore).filter(
|
|
145
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
|
|
146
|
+
).delete(synchronize_session=False)
|
|
147
|
+
session.commit()
|
|
140
148
|
|
|
141
149
|
def is_vectorstore_type(self, vectorstore) -> bool:
|
|
142
150
|
"""Check if the vectorstore is a PGVector store."""
|
|
@@ -199,7 +207,7 @@ class PGVectorAdapter(VectorStoreAdapter):
|
|
|
199
207
|
|
|
200
208
|
result = {}
|
|
201
209
|
try:
|
|
202
|
-
vectorstore_wrapper.
|
|
210
|
+
vectorstore_wrapper._log_tool_event(message="Retrieving already indexed code data from PGVector vectorstore",
|
|
203
211
|
tool_name="index_code_data")
|
|
204
212
|
store = vectorstore_wrapper.vectorstore
|
|
205
213
|
with (Session(store.session_maker.bind) as session):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: alita_sdk
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.393
|
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -41,11 +41,11 @@ alita_sdk/runtime/clients/datasource.py,sha256=HAZovoQN9jBg0_-lIlGBQzb4FJdczPhkH
|
|
|
41
41
|
alita_sdk/runtime/clients/prompt.py,sha256=li1RG9eBwgNK_Qf0qUaZ8QNTmsncFrAL2pv3kbxZRZg,1447
|
|
42
42
|
alita_sdk/runtime/clients/sandbox_client.py,sha256=OhEasE0MxBBDw4o76xkxVCpNpr3xJ8spQsrsVxMrjUA,16192
|
|
43
43
|
alita_sdk/runtime/langchain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
44
|
-
alita_sdk/runtime/langchain/assistant.py,sha256=
|
|
44
|
+
alita_sdk/runtime/langchain/assistant.py,sha256=ssgiRln0ZpPSjStqitTKj-EaSlsh5F6Ag6yMETESQUw,15456
|
|
45
45
|
alita_sdk/runtime/langchain/chat_message_template.py,sha256=kPz8W2BG6IMyITFDA5oeb5BxVRkHEVZhuiGl4MBZKdc,2176
|
|
46
46
|
alita_sdk/runtime/langchain/constants.py,sha256=eHVJ_beJNTf1WJo4yq7KMK64fxsRvs3lKc34QCXSbpk,3319
|
|
47
47
|
alita_sdk/runtime/langchain/indexer.py,sha256=0ENHy5EOhThnAiYFc7QAsaTNp9rr8hDV_hTK8ahbatk,37592
|
|
48
|
-
alita_sdk/runtime/langchain/langraph_agent.py,sha256=
|
|
48
|
+
alita_sdk/runtime/langchain/langraph_agent.py,sha256=gYL1m1WzN9dT-2m2gpA3b2IxGXOk1ytEbDVxuqyCrzU,49204
|
|
49
49
|
alita_sdk/runtime/langchain/mixedAgentParser.py,sha256=M256lvtsL3YtYflBCEp-rWKrKtcY1dJIyRGVv7KW9ME,2611
|
|
50
50
|
alita_sdk/runtime/langchain/mixedAgentRenderes.py,sha256=asBtKqm88QhZRILditjYICwFVKF5KfO38hu2O-WrSWE,5964
|
|
51
51
|
alita_sdk/runtime/langchain/store_manager.py,sha256=i8Fl11IXJhrBXq1F1ukEVln57B1IBe-tqSUvfUmBV4A,2218
|
|
@@ -114,7 +114,7 @@ alita_sdk/runtime/tools/function.py,sha256=jk_JrtuYByR9Df5EFOGFheB9HktNPJcOwf4js
|
|
|
114
114
|
alita_sdk/runtime/tools/graph.py,sha256=7jImBBSEdP5Mjnn2keOiyUwdGDFhEXLUrgUiugO3mgA,3503
|
|
115
115
|
alita_sdk/runtime/tools/image_generation.py,sha256=Kls9D_ke_SK7xmVr7I9SlQcAEBJc86gf66haN0qIj9k,7469
|
|
116
116
|
alita_sdk/runtime/tools/indexer_tool.py,sha256=whSLPevB4WD6dhh2JDXEivDmTvbjiMV1MrPl9cz5eLA,4375
|
|
117
|
-
alita_sdk/runtime/tools/llm.py,sha256=
|
|
117
|
+
alita_sdk/runtime/tools/llm.py,sha256=0Ki8YrwDYjAnMbo5eh_3t15ZTWqd4AyE328muFzOsmU,15365
|
|
118
118
|
alita_sdk/runtime/tools/loop.py,sha256=uds0WhZvwMxDVFI6MZHrcmMle637cQfBNg682iLxoJA,8335
|
|
119
119
|
alita_sdk/runtime/tools/loop_output.py,sha256=U4hO9PCQgWlXwOq6jdmCGbegtAxGAPXObSxZQ3z38uk,8069
|
|
120
120
|
alita_sdk/runtime/tools/mcp_server_tool.py,sha256=MhLxZJ44LYrB_0GrojmkyqKoDRaqIHkEQAsg718ipog,4277
|
|
@@ -136,7 +136,7 @@ alita_sdk/runtime/utils/toolkit_runtime.py,sha256=MU63Fpxj0b5_r1IUUc0Q3-PN9VwL7r
|
|
|
136
136
|
alita_sdk/runtime/utils/toolkit_utils.py,sha256=I9QFqnaqfVgN26LUr6s3XlBlG6y0CoHURnCzG7XcwVs,5311
|
|
137
137
|
alita_sdk/runtime/utils/utils.py,sha256=PJK8A-JVIzY1IowOjGG8DIqsIiEFe65qDKvFcjJCKWA,1041
|
|
138
138
|
alita_sdk/tools/__init__.py,sha256=NrZyTEdEhmO1NnAR9RFMQ05Mb-kgu68mAQz3n5r0HYs,10692
|
|
139
|
-
alita_sdk/tools/base_indexer_toolkit.py,sha256=
|
|
139
|
+
alita_sdk/tools/base_indexer_toolkit.py,sha256=JZR69D-ABZZEIE5I5LzthqM9-2xwLvuQPtLC3UYIPeE,26437
|
|
140
140
|
alita_sdk/tools/code_indexer_toolkit.py,sha256=p3zVnCnQTUf7JUGra9Rl6GEK2W1-hvvz0Xsgz0v0muM,7292
|
|
141
141
|
alita_sdk/tools/elitea_base.py,sha256=34fmVdYgd2YXifU5LFNjMQysr4OOIZ6AOZjq4GxLgSw,34417
|
|
142
142
|
alita_sdk/tools/non_code_indexer_toolkit.py,sha256=6Lrqor1VeSLbPLDHAfg_7UAUqKFy1r_n6bdsc4-ak98,1315
|
|
@@ -232,7 +232,7 @@ alita_sdk/tools/code/sonar/__init__.py,sha256=iPqj2PnUY4-btJjaDeWIPdn-c9L_uCr_qO
|
|
|
232
232
|
alita_sdk/tools/code/sonar/api_wrapper.py,sha256=nNqxcWN_6W8c0ckj-Er9HkNuAdgQLoWBXh5UyzNutis,2653
|
|
233
233
|
alita_sdk/tools/confluence/__init__.py,sha256=zRnPBM1c7VTRTS955HNc7AEGV5t8ACc2f9wBXmmeXao,6845
|
|
234
234
|
alita_sdk/tools/confluence/api_wrapper.py,sha256=TrB4g0gYlollej9kG68kQXrrOUiFwVUxYKo-tZ4ngac,90719
|
|
235
|
-
alita_sdk/tools/confluence/loader.py,sha256=
|
|
235
|
+
alita_sdk/tools/confluence/loader.py,sha256=xJhV80zM4x_oalwe36UqiEf4GOgCm_XEezLQNKj4j4k,10025
|
|
236
236
|
alita_sdk/tools/confluence/utils.py,sha256=Lxo6dBD0OlvM4o0JuK6qeB_4LV9BptiwJA9e1vqNcDw,435
|
|
237
237
|
alita_sdk/tools/custom_open_api/__init__.py,sha256=9aT5SPNPWcJC6jMZEM-3rUCXVULj_3-qJLQKmnreKNo,2537
|
|
238
238
|
alita_sdk/tools/custom_open_api/api_wrapper.py,sha256=sDSFpvEqpSvXHGiBISdQQcUecfO3md-_F8hAi6p2dvg,4340
|
|
@@ -332,7 +332,7 @@ alita_sdk/tools/testrail/api_wrapper.py,sha256=tQcGlFJmftvs5ZiO4tsP19fCo4CrJeq_U
|
|
|
332
332
|
alita_sdk/tools/utils/__init__.py,sha256=xB9OQgW65DftadrSpoAAitnEIbIXZKBOCji0NDe7FRM,3923
|
|
333
333
|
alita_sdk/tools/utils/available_tools_decorator.py,sha256=IbrdfeQkswxUFgvvN7-dyLMZMyXLiwvX7kgi3phciCk,273
|
|
334
334
|
alita_sdk/tools/utils/content_parser.py,sha256=7ohj8HeL_-rmc-Fv0TS8IpxIQC8tOpfuhyT3XlWx-gQ,15368
|
|
335
|
-
alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256
|
|
335
|
+
alita_sdk/tools/vector_adapters/VectorStoreAdapter.py,sha256=-9ByRh8bVRraTcJPS7SE-2l3en6A4UkKGS9iAd9fa3w,19722
|
|
336
336
|
alita_sdk/tools/vector_adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
337
337
|
alita_sdk/tools/xray/__init__.py,sha256=eOMWP8VamFbbJgt1xrGpGPqB9ByOTA0Cd3LCaETzGk4,4376
|
|
338
338
|
alita_sdk/tools/xray/api_wrapper.py,sha256=uj5kzUgPdo_Oct9WCNMOpkb6o_3L7J4LZrEGtrwYMmc,30157
|
|
@@ -353,8 +353,8 @@ alita_sdk/tools/zephyr_scale/api_wrapper.py,sha256=kT0TbmMvuKhDUZc0i7KO18O38JM9S
|
|
|
353
353
|
alita_sdk/tools/zephyr_squad/__init__.py,sha256=0ne8XLJEQSLOWfzd2HdnqOYmQlUliKHbBED5kW_Vias,2895
|
|
354
354
|
alita_sdk/tools/zephyr_squad/api_wrapper.py,sha256=kmw_xol8YIYFplBLWTqP_VKPRhL_1ItDD0_vXTe_UuI,14906
|
|
355
355
|
alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py,sha256=R371waHsms4sllHCbijKYs90C-9Yu0sSR3N4SUfQOgU,5066
|
|
356
|
-
alita_sdk-0.3.
|
|
357
|
-
alita_sdk-0.3.
|
|
358
|
-
alita_sdk-0.3.
|
|
359
|
-
alita_sdk-0.3.
|
|
360
|
-
alita_sdk-0.3.
|
|
356
|
+
alita_sdk-0.3.393.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
357
|
+
alita_sdk-0.3.393.dist-info/METADATA,sha256=EGvHTl8vdFizOdKbiJOld6GmaybBT3SAazljqzu_tOk,19071
|
|
358
|
+
alita_sdk-0.3.393.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
359
|
+
alita_sdk-0.3.393.dist-info/top_level.txt,sha256=0vJYy5p_jK6AwVb1aqXr7Kgqgk3WDtQ6t5C-XI9zkmg,10
|
|
360
|
+
alita_sdk-0.3.393.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|