xgae 0.2.0__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (49) hide show
  1. {xgae-0.2.0 → xgae-0.2.1}/.env +1 -0
  2. {xgae-0.2.0 → xgae-0.2.1}/CHANGELOG.md +6 -0
  3. {xgae-0.2.0 → xgae-0.2.1}/PKG-INFO +6 -2
  4. {xgae-0.2.0 → xgae-0.2.1}/pyproject.toml +9 -2
  5. xgae-0.2.1/src/examples/agent/langgraph/react/custom_prompt_rag.py +68 -0
  6. {xgae-0.2.0 → xgae-0.2.1}/src/examples/agent/langgraph/react/react_agent.py +9 -4
  7. {xgae-0.2.0 → xgae-0.2.1}/src/examples/agent/langgraph/react/result_eval_agent.py +1 -0
  8. {xgae-0.2.0 → xgae-0.2.1}/src/examples/agent/langgraph/react/run_react_agent.py +1 -1
  9. {xgae-0.2.0 → xgae-0.2.1}/src/examples/engine/run_custom_and_agent_tools.py +1 -1
  10. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/llm_client.py +1 -1
  11. xgae-0.2.1/uv.lock +2556 -0
  12. xgae-0.2.0/uv.lock +0 -1463
  13. {xgae-0.2.0 → xgae-0.2.1}/.python-version +0 -0
  14. {xgae-0.2.0 → xgae-0.2.1}/README.md +0 -0
  15. {xgae-0.2.0 → xgae-0.2.1}/mcpservers/custom_servers.json +0 -0
  16. {xgae-0.2.0 → xgae-0.2.1}/mcpservers/xga_server.json +0 -0
  17. {xgae-0.2.0 → xgae-0.2.1}/mcpservers/xga_server_sse.json +0 -0
  18. {xgae-0.2.0 → xgae-0.2.1}/src/examples/agent/langgraph/react/agent_base.py +0 -0
  19. {xgae-0.2.0 → xgae-0.2.1}/src/examples/engine/run_general_tools.py +0 -0
  20. {xgae-0.2.0 → xgae-0.2.1}/src/examples/engine/run_human_in_loop.py +0 -0
  21. {xgae-0.2.0 → xgae-0.2.1}/src/examples/engine/run_simple.py +0 -0
  22. {xgae-0.2.0 → xgae-0.2.1}/src/examples/tools/custom_fault_tools_app.py +0 -0
  23. {xgae-0.2.0 → xgae-0.2.1}/src/examples/tools/simu_a2a_tools_app.py +0 -0
  24. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/__init__.py +0 -0
  25. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/engine_base.py +0 -0
  26. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/mcp_tool_box.py +0 -0
  27. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/prompt_builder.py +0 -0
  28. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/responser/non_stream_responser.py +0 -0
  29. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/responser/responser_base.py +0 -0
  30. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/responser/stream_responser.py +0 -0
  31. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/task_engine.py +0 -0
  32. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine/task_langfuse.py +0 -0
  33. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/engine_cli_app.py +0 -0
  34. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/tools/without_general_tools_app.py +0 -0
  35. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/__init__.py +0 -0
  36. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/json_helpers.py +0 -0
  37. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/misc.py +0 -0
  38. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/setup_env.py +0 -0
  39. {xgae-0.2.0 → xgae-0.2.1}/src/xgae/utils/xml_tool_parser.py +0 -0
  40. {xgae-0.2.0 → xgae-0.2.1}/templates/agent_tool_prompt_template.txt +0 -0
  41. {xgae-0.2.0 → xgae-0.2.1}/templates/custom_tool_prompt_template.txt +0 -0
  42. {xgae-0.2.0 → xgae-0.2.1}/templates/example/fault_user_prompt.txt +0 -0
  43. {xgae-0.2.0 → xgae-0.2.1}/templates/example/result_eval_template.txt +0 -0
  44. {xgae-0.2.0 → xgae-0.2.1}/templates/gemini_system_prompt_template.txt +0 -0
  45. {xgae-0.2.0 → xgae-0.2.1}/templates/general_tool_prompt_template.txt +0 -0
  46. {xgae-0.2.0 → xgae-0.2.1}/templates/system_prompt_response_sample.txt +0 -0
  47. {xgae-0.2.0 → xgae-0.2.1}/templates/system_prompt_template.txt +0 -0
  48. {xgae-0.2.0 → xgae-0.2.1}/test/test_langfuse.py +0 -0
  49. {xgae-0.2.0 → xgae-0.2.1}/test/test_litellm_langfuse.py +0 -0
@@ -12,6 +12,7 @@ LANGFUSE_HOST=https://cloud.langfuse.com
12
12
 
13
13
  # LLM
14
14
  LLM_MODEL=openai/qwen3-235b-a22b
15
+ #LLM_MODEL=openai/qwen3-4b
15
16
  LLM_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
16
17
  LLM_API_KEY=
17
18
  LLM_MAX_TOKENS=16384
@@ -1,4 +1,10 @@
1
1
  # Release Changelog
2
+ ## [0.2.1] - 2025-9-17
3
+ ### Added
4
+ - Example Langgraph ReactAgent: add Chromadb for custom prompt RAG
5
+ ### Modified
6
+ - pyproject.toml: add [project.optional-dependencies] 'examples'
7
+
2
8
  ## [0.2.0] - 2025-9-10
3
9
  ### Added
4
10
  - Agent Engine release 0.2
@@ -1,12 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
7
7
  Requires-Dist: langchain-mcp-adapters==0.1.9
8
8
  Requires-Dist: langchain==0.3.27
9
9
  Requires-Dist: langfuse==2.60.9
10
- Requires-Dist: langgraph==0.6.5
11
10
  Requires-Dist: litellm==1.74.15
12
11
  Requires-Dist: mcp==1.13.0
12
+ Provides-Extra: examples
13
+ Requires-Dist: chromadb==1.1.0; extra == 'examples'
14
+ Requires-Dist: fastembed==0.7.3; extra == 'examples'
15
+ Requires-Dist: langchain-community==0.3.29; extra == 'examples'
16
+ Requires-Dist: langgraph==0.6.5; extra == 'examples'
@@ -1,19 +1,26 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.2.0"
3
+ version = "0.2.1"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
7
7
  dependencies = [
8
8
  "colorlog==6.9.0",
9
9
  "langchain-mcp-adapters==0.1.9",
10
- "langgraph==0.6.5",
11
10
  "litellm==1.74.15",
12
11
  "mcp==1.13.0",
13
12
  "langfuse==2.60.9",
14
13
  "langchain==0.3.27",
15
14
  ]
16
15
 
16
+ [project.optional-dependencies]
17
+ examples = [
18
+ "langgraph==0.6.5",
19
+ "langchain-community==0.3.29",
20
+ "fastembed==0.7.3",
21
+ "chromadb==1.1.0",
22
+ ]
23
+
17
24
  [build-system]
18
25
  requires = ["hatchling"]
19
26
  build-backend = "hatchling.build"
@@ -0,0 +1,68 @@
1
+ import logging
2
+ import os
3
+ from typing import List, Optional
4
+
5
+ os.environ['HF_ENDPOINT']="https://hf-mirror.com"
6
+ os.environ['TOKENIZERS_PARALLELISM']="False"
7
+
8
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
9
+ from langchain_community.vectorstores import Chroma
10
+ from langchain_core.documents import Document
11
+
12
+ class CustomPromptRag:
13
+ PROMPT_SIMILAR_SCORE = 0.8
14
+
15
+ def __init__(self, emb_model_name: Optional[str] = "BAAI/bge-small-zh-v1.5"):
16
+ self.emb_model_name = emb_model_name
17
+ prompt_docs = []
18
+ # should read from DB, load all custom prompt or COT
19
+ prompt_docs.append(self._create_prompt_doc(
20
+ prompt_summary="Fault location and analysis of fault causes",
21
+ prompt_path="templates/example/fault_user_prompt.txt"
22
+ ))
23
+ self.vector_store = self._init_vector_store(prompt_docs)
24
+
25
+
26
+ def _create_prompt_doc(self, prompt_summary: str, prompt_path: str)-> Document:
27
+ return Document(
28
+ page_content=prompt_summary,
29
+ metadata={
30
+ "source": prompt_path,
31
+ }
32
+ )
33
+
34
+
35
+ def _init_vector_store(self, docs: List[Document]) -> Chroma:
36
+ # FastEmbedEmbeddings first time will download BAAI/bge-small-zh-v1.5 embedding model from HF
37
+ embeddings = FastEmbedEmbeddings(model_name=self.emb_model_name)
38
+ return Chroma.from_documents(documents=docs, embedding=embeddings, persist_directory=None)
39
+
40
+
41
+ def search_prompt(self, query:str)-> str | None:
42
+ prompt_path = None
43
+ results = self.vector_store.similarity_search_with_score(query=query, k=1)
44
+ if results and len(results) > 0:
45
+ doc, score = results[0]
46
+ if score > self.PROMPT_SIMILAR_SCORE:
47
+ logging.info(f"CustomPromptRag search: SIMILAR_SCORE: {score} > {self.PROMPT_SIMILAR_SCORE}, "
48
+ f"\nquery: '{query}' \nprompt_summary: '{doc.page_content}'\n")
49
+ else:
50
+ prompt_path = doc.metadata['source']
51
+ logging.info(f"CustomPromptRag search: SIMILAR_SCORE: {score}, prompt_path: '{prompt_path}'")
52
+
53
+ return prompt_path
54
+
55
+ if __name__ == "__main__":
56
+ from xgae.utils.setup_env import setup_logging
57
+
58
+ setup_logging()
59
+
60
+ custom_prompt_rag = CustomPromptRag()
61
+
62
+ querys = ["locate 10.2.3.4 fault and solution",
63
+ "5+7"]
64
+
65
+ for query in querys:
66
+ logging.info("*"*50)
67
+ logging.info(f"query: '{query}'")
68
+ custom_prompt_rag.search_prompt(query)
@@ -22,12 +22,13 @@ from xgae.engine.task_engine import XGATaskEngine
22
22
 
23
23
  from examples.agent.langgraph.react.agent_base import AgentContext, TaskState, EvaluateResult
24
24
  from examples.agent.langgraph.react.result_eval_agent import TaskResultEvalAgent
25
+ from examples.agent.langgraph.react.custom_prompt_rag import CustomPromptRag
25
26
 
26
27
  class XGAReactAgent:
27
28
  MAX_TASK_RETRY = 2
28
29
  QUALIFIED_RESULT_SCORE = 0.7
29
30
 
30
- def __init__(self):
31
+ def __init__(self, use_prompt_rag: Optional[bool] = False):
31
32
  self.graph = None
32
33
 
33
34
  self.graph_config = None
@@ -37,6 +38,8 @@ class XGAReactAgent:
37
38
  self.tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
38
39
  self.result_eval_agent = TaskResultEvalAgent()
39
40
 
41
+ if use_prompt_rag:
42
+ self.custom_prompt_rag = CustomPromptRag()
40
43
 
41
44
  async def _create_graph(self) -> StateGraph:
42
45
  try:
@@ -86,8 +89,10 @@ class XGAReactAgent:
86
89
 
87
90
 
88
91
  def _search_system_prompt(self, user_input: str) -> str:
89
- # You should search RAG use user_input, fetch COT or Prompt for your business
90
- system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
92
+ if hasattr(self, 'custom_prompt_rag'):
93
+ system_prompt = self.custom_prompt_rag.search_prompt(user_input)
94
+ else:
95
+ system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
91
96
  return system_prompt
92
97
 
93
98
 
@@ -145,7 +150,7 @@ class XGAReactAgent:
145
150
 
146
151
 
147
152
  def _select_custom_tools(self, system_prompt: str) -> list[str]:
148
- # @todo select mcp tool based on system prompt in future
153
+ # @todo search mcp tool based on system prompt or user_input in future
149
154
  custom_tools = ["*"] if system_prompt else []
150
155
  return custom_tools
151
156
 
@@ -27,6 +27,7 @@ class TaskResultEvalAgent:
27
27
 
28
28
  langfuse_metadata = self._create_llm_langfuse_meta(trace_id, session_id)
29
29
 
30
+ logging.info("TaskResultEvalAgent: LLM Eval result.....")
30
31
  response = await self.model_client.acompletion(messages, langfuse_metadata)
31
32
  response_text = await self.model_client.get_response_result(response)
32
33
 
@@ -17,7 +17,7 @@ async def main():
17
17
  ]
18
18
 
19
19
  for user_input in user_inputs:
20
- agent = XGAReactAgent()
20
+ agent = XGAReactAgent(use_prompt_rag=False)
21
21
  task_no += 1
22
22
  context: AgentContext = {
23
23
  'task_id': f"agent_task_{uuid4()}", # can be set with request_id, must be unique
@@ -14,7 +14,7 @@ else:
14
14
  setup_logging()
15
15
 
16
16
  # Before Run Exec: uv run example-fault-tools --alarmtype=2 , uv run example-a2a-tools
17
- # If want to use real A2A agent tool, use xga-agent-tool project
17
+ # If want to use real A2A agent tool, use xgaproxy project, uv run xga-a2a-proxy & uv run example-a2a-server
18
18
 
19
19
  async def main() -> None:
20
20
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
@@ -56,7 +56,7 @@ class LLMClient:
56
56
  env_llm_model = os.getenv('LLM_MODEL', "openai/qwen3-235b-a22b")
57
57
  env_llm_api_key = os.getenv('LLM_API_KEY')
58
58
  env_llm_api_base = os.getenv('LLM_API_BASE', "https://dashscope.aliyuncs.com/compatible-mode/v1")
59
- env_llm_max_tokens = int(os.getenv('LLM_MAX_TOKENS', 16384))
59
+ env_llm_max_tokens = int(os.getenv('LLM_MAX_TOKENS', 8192))
60
60
  env_llm_temperature = float(os.getenv('LLM_TEMPERATURE', 0.7))
61
61
  env_llm_stream = to_bool(os.getenv('LLM_STREAM', False))
62
62
  env_llm_enable_thinking = to_bool(os.getenv('LLM_ENABLE_THINKING', False))