praisonaiagents 0.0.107__py3-none-any.whl → 0.0.109__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +50 -7
- praisonaiagents/llm/llm.py +17 -15
- praisonaiagents/mcp/mcp.py +39 -1
- praisonaiagents/mcp/mcp_sse.py +41 -1
- {praisonaiagents-0.0.107.dist-info → praisonaiagents-0.0.109.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.107.dist-info → praisonaiagents-0.0.109.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.107.dist-info → praisonaiagents-0.0.109.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.107.dist-info → praisonaiagents-0.0.109.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -364,6 +364,7 @@ class Agent:
|
|
364
364
|
knowledge_config: Optional[Dict[str, Any]] = None,
|
365
365
|
use_system_prompt: Optional[bool] = True,
|
366
366
|
markdown: bool = True,
|
367
|
+
stream: bool = True,
|
367
368
|
self_reflect: bool = False,
|
368
369
|
max_reflect: int = 3,
|
369
370
|
min_reflect: int = 1,
|
@@ -372,7 +373,9 @@ class Agent:
|
|
372
373
|
user_id: Optional[str] = None,
|
373
374
|
reasoning_steps: bool = False,
|
374
375
|
guardrail: Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]] = None,
|
375
|
-
max_guardrail_retries: int = 3
|
376
|
+
max_guardrail_retries: int = 3,
|
377
|
+
base_url: Optional[str] = None,
|
378
|
+
api_key: Optional[str] = None
|
376
379
|
):
|
377
380
|
"""Initialize an Agent instance.
|
378
381
|
|
@@ -435,6 +438,8 @@ class Agent:
|
|
435
438
|
conversations to establish agent behavior and context. Defaults to True.
|
436
439
|
markdown (bool, optional): Enable markdown formatting in agent responses for better
|
437
440
|
readability and structure. Defaults to True.
|
441
|
+
stream (bool, optional): Enable streaming responses from the language model. Set to False
|
442
|
+
for LLM providers that don't support streaming. Defaults to True.
|
438
443
|
self_reflect (bool, optional): Enable self-reflection capabilities where the agent
|
439
444
|
evaluates and improves its own responses. Defaults to False.
|
440
445
|
max_reflect (int, optional): Maximum number of self-reflection iterations to prevent
|
@@ -454,6 +459,10 @@ class Agent:
|
|
454
459
|
description string for LLM-based validation. Defaults to None.
|
455
460
|
max_guardrail_retries (int, optional): Maximum number of retry attempts when guardrail
|
456
461
|
validation fails before giving up. Defaults to 3.
|
462
|
+
base_url (Optional[str], optional): Base URL for custom LLM endpoints (e.g., Ollama).
|
463
|
+
If provided, automatically creates a custom LLM instance. Defaults to None.
|
464
|
+
api_key (Optional[str], optional): API key for LLM provider. If not provided,
|
465
|
+
falls back to environment variables. Defaults to None.
|
457
466
|
|
458
467
|
Raises:
|
459
468
|
ValueError: If all of name, role, goal, backstory, and instructions are None.
|
@@ -500,10 +509,40 @@ class Agent:
|
|
500
509
|
# Check for model name in environment variable if not provided
|
501
510
|
self._using_custom_llm = False
|
502
511
|
|
512
|
+
# If base_url is provided, always create a custom LLM instance
|
513
|
+
if base_url:
|
514
|
+
try:
|
515
|
+
from ..llm.llm import LLM
|
516
|
+
# Handle different llm parameter types with base_url
|
517
|
+
if isinstance(llm, dict):
|
518
|
+
# Merge base_url and api_key into the dict
|
519
|
+
llm_config = llm.copy()
|
520
|
+
llm_config['base_url'] = base_url
|
521
|
+
if api_key:
|
522
|
+
llm_config['api_key'] = api_key
|
523
|
+
self.llm_instance = LLM(**llm_config)
|
524
|
+
else:
|
525
|
+
# Create LLM with model string and base_url
|
526
|
+
model_name = llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
|
527
|
+
self.llm_instance = LLM(
|
528
|
+
model=model_name,
|
529
|
+
base_url=base_url,
|
530
|
+
api_key=api_key
|
531
|
+
)
|
532
|
+
self._using_custom_llm = True
|
533
|
+
except ImportError as e:
|
534
|
+
raise ImportError(
|
535
|
+
"LLM features requested but dependencies not installed. "
|
536
|
+
"Please install with: pip install \"praisonaiagents[llm]\""
|
537
|
+
) from e
|
503
538
|
# If the user passes a dictionary (for advanced configuration)
|
504
|
-
|
539
|
+
elif isinstance(llm, dict) and "model" in llm:
|
505
540
|
try:
|
506
541
|
from ..llm.llm import LLM
|
542
|
+
# Add api_key if provided and not in dict
|
543
|
+
if api_key and 'api_key' not in llm:
|
544
|
+
llm = llm.copy()
|
545
|
+
llm['api_key'] = api_key
|
507
546
|
self.llm_instance = LLM(**llm) # Pass all dict items as kwargs
|
508
547
|
self._using_custom_llm = True
|
509
548
|
except ImportError as e:
|
@@ -516,7 +555,10 @@ class Agent:
|
|
516
555
|
try:
|
517
556
|
from ..llm.llm import LLM
|
518
557
|
# Pass the entire string so LiteLLM can parse provider/model
|
519
|
-
|
558
|
+
llm_params = {'model': llm}
|
559
|
+
if api_key:
|
560
|
+
llm_params['api_key'] = api_key
|
561
|
+
self.llm_instance = LLM(**llm_params)
|
520
562
|
self._using_custom_llm = True
|
521
563
|
|
522
564
|
# Ensure tools are properly accessible when using custom LLM
|
@@ -554,6 +596,7 @@ class Agent:
|
|
554
596
|
self.use_system_prompt = use_system_prompt
|
555
597
|
self.chat_history = []
|
556
598
|
self.markdown = markdown
|
599
|
+
self.stream = stream
|
557
600
|
self.max_reflect = max_reflect
|
558
601
|
self.min_reflect = min_reflect
|
559
602
|
self.reflect_prompt = reflect_prompt
|
@@ -1002,7 +1045,7 @@ Your Goal: {self.goal}
|
|
1002
1045
|
tools=formatted_tools if formatted_tools else None,
|
1003
1046
|
verbose=self.verbose,
|
1004
1047
|
markdown=self.markdown,
|
1005
|
-
stream=
|
1048
|
+
stream=stream,
|
1006
1049
|
console=self.console,
|
1007
1050
|
execute_tool_fn=self.execute_tool,
|
1008
1051
|
agent_name=self.name,
|
@@ -1018,7 +1061,7 @@ Your Goal: {self.goal}
|
|
1018
1061
|
tools=formatted_tools if formatted_tools else None,
|
1019
1062
|
verbose=self.verbose,
|
1020
1063
|
markdown=self.markdown,
|
1021
|
-
stream=
|
1064
|
+
stream=stream,
|
1022
1065
|
console=self.console,
|
1023
1066
|
execute_tool_fn=self.execute_tool,
|
1024
1067
|
agent_name=self.name,
|
@@ -1276,7 +1319,7 @@ Your Goal: {self.goal}
|
|
1276
1319
|
agent_tools=agent_tools
|
1277
1320
|
)
|
1278
1321
|
|
1279
|
-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream)
|
1322
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1280
1323
|
if not response:
|
1281
1324
|
return None
|
1282
1325
|
|
@@ -1371,7 +1414,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1371
1414
|
|
1372
1415
|
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
|
1373
1416
|
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
1374
|
-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream)
|
1417
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
|
1375
1418
|
response_text = response.choices[0].message.content.strip()
|
1376
1419
|
reflection_count += 1
|
1377
1420
|
continue # Continue the loop for more reflections
|
praisonaiagents/llm/llm.py
CHANGED
@@ -296,6 +296,7 @@ class LLM:
|
|
296
296
|
agent_role: Optional[str] = None,
|
297
297
|
agent_tools: Optional[List[str]] = None,
|
298
298
|
execute_tool_fn: Optional[Callable] = None,
|
299
|
+
stream: bool = True,
|
299
300
|
**kwargs
|
300
301
|
) -> str:
|
301
302
|
"""Enhanced get_response with all OpenAI-like features"""
|
@@ -487,7 +488,7 @@ class LLM:
|
|
487
488
|
messages=messages,
|
488
489
|
tools=formatted_tools,
|
489
490
|
temperature=temperature,
|
490
|
-
stream=
|
491
|
+
stream=stream,
|
491
492
|
**kwargs
|
492
493
|
)
|
493
494
|
):
|
@@ -503,7 +504,7 @@ class LLM:
|
|
503
504
|
messages=messages,
|
504
505
|
tools=formatted_tools,
|
505
506
|
temperature=temperature,
|
506
|
-
stream=
|
507
|
+
stream=stream,
|
507
508
|
**kwargs
|
508
509
|
)
|
509
510
|
):
|
@@ -655,7 +656,7 @@ class LLM:
|
|
655
656
|
**self._build_completion_params(
|
656
657
|
messages=follow_up_messages,
|
657
658
|
temperature=temperature,
|
658
|
-
stream=
|
659
|
+
stream=stream
|
659
660
|
)
|
660
661
|
):
|
661
662
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -668,7 +669,7 @@ class LLM:
|
|
668
669
|
**self._build_completion_params(
|
669
670
|
messages=follow_up_messages,
|
670
671
|
temperature=temperature,
|
671
|
-
stream=
|
672
|
+
stream=stream
|
672
673
|
)
|
673
674
|
):
|
674
675
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -755,7 +756,7 @@ class LLM:
|
|
755
756
|
messages=messages,
|
756
757
|
tools=formatted_tools,
|
757
758
|
temperature=temperature,
|
758
|
-
stream=
|
759
|
+
stream=stream,
|
759
760
|
**kwargs
|
760
761
|
)
|
761
762
|
):
|
@@ -873,7 +874,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
873
874
|
**self._build_completion_params(
|
874
875
|
messages=reflection_messages,
|
875
876
|
temperature=temperature,
|
876
|
-
stream=
|
877
|
+
stream=stream,
|
877
878
|
response_format={"type": "json_object"},
|
878
879
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
879
880
|
)
|
@@ -888,7 +889,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
888
889
|
**self._build_completion_params(
|
889
890
|
messages=reflection_messages,
|
890
891
|
temperature=temperature,
|
891
|
-
stream=
|
892
|
+
stream=stream,
|
892
893
|
response_format={"type": "json_object"},
|
893
894
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
894
895
|
)
|
@@ -1004,6 +1005,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1004
1005
|
agent_role: Optional[str] = None,
|
1005
1006
|
agent_tools: Optional[List[str]] = None,
|
1006
1007
|
execute_tool_fn: Optional[Callable] = None,
|
1008
|
+
stream: bool = True,
|
1007
1009
|
**kwargs
|
1008
1010
|
) -> str:
|
1009
1011
|
"""Async version of get_response with identical functionality."""
|
@@ -1204,7 +1206,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1204
1206
|
**self._build_completion_params(
|
1205
1207
|
messages=messages,
|
1206
1208
|
temperature=temperature,
|
1207
|
-
stream=
|
1209
|
+
stream=stream,
|
1208
1210
|
**kwargs
|
1209
1211
|
)
|
1210
1212
|
):
|
@@ -1218,7 +1220,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1218
1220
|
**self._build_completion_params(
|
1219
1221
|
messages=messages,
|
1220
1222
|
temperature=temperature,
|
1221
|
-
stream=
|
1223
|
+
stream=stream,
|
1222
1224
|
**kwargs
|
1223
1225
|
)
|
1224
1226
|
):
|
@@ -1355,7 +1357,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1355
1357
|
**self._build_completion_params(
|
1356
1358
|
messages=follow_up_messages,
|
1357
1359
|
temperature=temperature,
|
1358
|
-
stream=
|
1360
|
+
stream=stream
|
1359
1361
|
)
|
1360
1362
|
):
|
1361
1363
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -1369,7 +1371,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1369
1371
|
**self._build_completion_params(
|
1370
1372
|
messages=follow_up_messages,
|
1371
1373
|
temperature=temperature,
|
1372
|
-
stream=
|
1374
|
+
stream=stream
|
1373
1375
|
)
|
1374
1376
|
):
|
1375
1377
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
@@ -1437,7 +1439,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1437
1439
|
**self._build_completion_params(
|
1438
1440
|
messages=messages,
|
1439
1441
|
temperature=temperature,
|
1440
|
-
stream=
|
1442
|
+
stream=stream,
|
1441
1443
|
tools=formatted_tools,
|
1442
1444
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1443
1445
|
)
|
@@ -1453,7 +1455,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1453
1455
|
**self._build_completion_params(
|
1454
1456
|
messages=messages,
|
1455
1457
|
temperature=temperature,
|
1456
|
-
stream=
|
1458
|
+
stream=stream,
|
1457
1459
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1458
1460
|
)
|
1459
1461
|
):
|
@@ -1534,7 +1536,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1534
1536
|
**self._build_completion_params(
|
1535
1537
|
messages=reflection_messages,
|
1536
1538
|
temperature=temperature,
|
1537
|
-
stream=
|
1539
|
+
stream=stream,
|
1538
1540
|
response_format={"type": "json_object"},
|
1539
1541
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1540
1542
|
)
|
@@ -1549,7 +1551,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1549
1551
|
**self._build_completion_params(
|
1550
1552
|
messages=reflection_messages,
|
1551
1553
|
temperature=temperature,
|
1552
|
-
stream=
|
1554
|
+
stream=stream,
|
1553
1555
|
response_format={"type": "json_object"},
|
1554
1556
|
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
1555
1557
|
)
|
praisonaiagents/mcp/mcp.py
CHANGED
@@ -379,6 +379,43 @@ class MCP:
|
|
379
379
|
"""
|
380
380
|
return iter(self._tools)
|
381
381
|
|
382
|
+
def _fix_array_schemas(self, schema):
|
383
|
+
"""
|
384
|
+
Fix array schemas by adding missing 'items' attribute required by OpenAI.
|
385
|
+
|
386
|
+
This ensures compatibility with OpenAI's function calling format which
|
387
|
+
requires array types to specify the type of items they contain.
|
388
|
+
|
389
|
+
Args:
|
390
|
+
schema: The schema dictionary to fix
|
391
|
+
|
392
|
+
Returns:
|
393
|
+
dict: The fixed schema
|
394
|
+
"""
|
395
|
+
if not isinstance(schema, dict):
|
396
|
+
return schema
|
397
|
+
|
398
|
+
# Create a copy to avoid modifying the original
|
399
|
+
fixed_schema = schema.copy()
|
400
|
+
|
401
|
+
# Fix array types at the current level
|
402
|
+
if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
|
403
|
+
# Add a default items schema for arrays without it
|
404
|
+
fixed_schema["items"] = {"type": "string"}
|
405
|
+
|
406
|
+
# Recursively fix nested schemas
|
407
|
+
if "properties" in fixed_schema:
|
408
|
+
fixed_properties = {}
|
409
|
+
for prop_name, prop_schema in fixed_schema["properties"].items():
|
410
|
+
fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
|
411
|
+
fixed_schema["properties"] = fixed_properties
|
412
|
+
|
413
|
+
# Fix items schema if it exists
|
414
|
+
if "items" in fixed_schema:
|
415
|
+
fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
|
416
|
+
|
417
|
+
return fixed_schema
|
418
|
+
|
382
419
|
def to_openai_tool(self):
|
383
420
|
"""Convert the MCP tool to an OpenAI-compatible tool definition.
|
384
421
|
|
@@ -404,7 +441,8 @@ class MCP:
|
|
404
441
|
# Create OpenAI tool definition
|
405
442
|
parameters = {}
|
406
443
|
if hasattr(tool, 'inputSchema') and tool.inputSchema:
|
407
|
-
|
444
|
+
# Fix array schemas to include 'items' attribute
|
445
|
+
parameters = self._fix_array_schemas(tool.inputSchema)
|
408
446
|
else:
|
409
447
|
# Create a minimal schema if none exists
|
410
448
|
parameters = {
|
praisonaiagents/mcp/mcp_sse.py
CHANGED
@@ -88,14 +88,54 @@ class SSEMCPTool:
|
|
88
88
|
logger.error(f"Error in _async_call for {self.name}: {e}")
|
89
89
|
raise
|
90
90
|
|
91
|
+
def _fix_array_schemas(self, schema):
|
92
|
+
"""
|
93
|
+
Fix array schemas by adding missing 'items' attribute required by OpenAI.
|
94
|
+
|
95
|
+
This ensures compatibility with OpenAI's function calling format which
|
96
|
+
requires array types to specify the type of items they contain.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
schema: The schema dictionary to fix
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
dict: The fixed schema
|
103
|
+
"""
|
104
|
+
if not isinstance(schema, dict):
|
105
|
+
return schema
|
106
|
+
|
107
|
+
# Create a copy to avoid modifying the original
|
108
|
+
fixed_schema = schema.copy()
|
109
|
+
|
110
|
+
# Fix array types at the current level
|
111
|
+
if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
|
112
|
+
# Add a default items schema for arrays without it
|
113
|
+
fixed_schema["items"] = {"type": "string"}
|
114
|
+
|
115
|
+
# Recursively fix nested schemas
|
116
|
+
if "properties" in fixed_schema:
|
117
|
+
fixed_properties = {}
|
118
|
+
for prop_name, prop_schema in fixed_schema["properties"].items():
|
119
|
+
fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
|
120
|
+
fixed_schema["properties"] = fixed_properties
|
121
|
+
|
122
|
+
# Fix items schema if it exists
|
123
|
+
if "items" in fixed_schema:
|
124
|
+
fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
|
125
|
+
|
126
|
+
return fixed_schema
|
127
|
+
|
91
128
|
def to_openai_tool(self):
|
92
129
|
"""Convert the tool to OpenAI format."""
|
130
|
+
# Fix array schemas to include 'items' attribute
|
131
|
+
fixed_schema = self._fix_array_schemas(self.input_schema)
|
132
|
+
|
93
133
|
return {
|
94
134
|
"type": "function",
|
95
135
|
"function": {
|
96
136
|
"name": self.name,
|
97
137
|
"description": self.description,
|
98
|
-
"parameters":
|
138
|
+
"parameters": fixed_schema
|
99
139
|
}
|
100
140
|
}
|
101
141
|
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
|
4
4
|
praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=VILZRrFbUJFF_gtATMfx-fpqoBMMwyiRmvBWo9592Ds,112093
|
7
7
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
8
8
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
9
9
|
praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
|
@@ -15,10 +15,10 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
15
15
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
16
16
|
praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
|
17
17
|
praisonaiagents/llm/__init__.py,sha256=bSywIHBHH0YUf4hSx-FmFXkRv2g1Rlhuk-gjoImE8j8,925
|
18
|
-
praisonaiagents/llm/llm.py,sha256=
|
18
|
+
praisonaiagents/llm/llm.py,sha256=NfsJNSScR_kS2sLeU1Ah41IXYN804cOQEMuxpt59zuM,104505
|
19
19
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
20
|
-
praisonaiagents/mcp/mcp.py,sha256=
|
21
|
-
praisonaiagents/mcp/mcp_sse.py,sha256=
|
20
|
+
praisonaiagents/mcp/mcp.py,sha256=qr9xbTfM3V6ZQgs3o9mGv6pDiJPnfbI24nUK_vUnGOI,18771
|
21
|
+
praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYSY,8357
|
22
22
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
23
23
|
praisonaiagents/memory/memory.py,sha256=eYXVvuXrvt4LaEJ-AAbAiwpFUCuS5LH5F7Z0cBW5_gQ,42186
|
24
24
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
@@ -51,7 +51,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
51
51
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
52
52
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
53
53
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
54
|
-
praisonaiagents-0.0.
|
55
|
-
praisonaiagents-0.0.
|
56
|
-
praisonaiagents-0.0.
|
57
|
-
praisonaiagents-0.0.
|
54
|
+
praisonaiagents-0.0.109.dist-info/METADATA,sha256=978n4Xm55N1ZwIQYE4vtLBVqsIuCylWFOEW1bWZTa6g,1669
|
55
|
+
praisonaiagents-0.0.109.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
56
|
+
praisonaiagents-0.0.109.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
57
|
+
praisonaiagents-0.0.109.dist-info/RECORD,,
|
File without changes
|
File without changes
|