dao-ai 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dao_ai/graph.py CHANGED
@@ -219,7 +219,9 @@ def _create_swarm_graph(config: AppConfig) -> CompiledStateGraph:
219
219
 
220
220
  workflow.set_entry_point("message_hook")
221
221
 
222
- return workflow.compile(checkpointer=checkpointer, store=store)
222
+ return swarm_node
223
+
224
+ # return workflow.compile(checkpointer=checkpointer, store=store)
223
225
 
224
226
 
225
227
  def create_dao_ai_graph(config: AppConfig) -> CompiledStateGraph:
dao_ai/models.py CHANGED
@@ -65,9 +65,19 @@ class LanggraphChatModel(ChatModel):
65
65
  context: Context = self._convert_to_context(params)
66
66
  custom_inputs: dict[str, Any] = {"configurable": context.model_dump()}
67
67
 
68
- response: dict[str, Sequence[BaseMessage]] = self.graph.invoke(
69
- request, context=context, config=custom_inputs
68
+ # Use async ainvoke internally for parallel execution
69
+ import asyncio
70
+
71
+ async def _async_invoke():
72
+ return await self.graph.ainvoke(
73
+ request, context=context, config=custom_inputs
74
+ )
75
+
76
+ loop = asyncio.get_event_loop()
77
+ response: dict[str, Sequence[BaseMessage]] = loop.run_until_complete(
78
+ _async_invoke()
70
79
  )
80
+
71
81
  logger.trace(f"response: {response}")
72
82
 
73
83
  last_message: BaseMessage = response["messages"][-1]
@@ -114,33 +124,51 @@ class LanggraphChatModel(ChatModel):
114
124
  context: Context = self._convert_to_context(params)
115
125
  custom_inputs: dict[str, Any] = {"configurable": context.model_dump()}
116
126
 
117
- for nodes, stream_mode, messages_batch in self.graph.stream(
118
- request,
119
- context=context,
120
- config=custom_inputs,
121
- stream_mode=["messages", "custom"],
122
- subgraphs=True,
123
- ):
124
- nodes: tuple[str, ...]
125
- stream_mode: str
126
- messages_batch: Sequence[BaseMessage]
127
- logger.trace(
128
- f"nodes: {nodes}, stream_mode: {stream_mode}, messages: {messages_batch}"
129
- )
130
- for message in messages_batch:
131
- if (
132
- isinstance(
133
- message,
134
- (
135
- AIMessageChunk,
136
- AIMessage,
137
- ),
138
- )
139
- and message.content
140
- and "summarization" not in nodes
141
- ):
142
- content = message.content
143
- yield self._create_chat_completion_chunk(content)
127
+ # Use async astream internally for parallel execution
128
+ import asyncio
129
+
130
+ async def _async_stream():
131
+ async for nodes, stream_mode, messages_batch in self.graph.astream(
132
+ request,
133
+ context=context,
134
+ config=custom_inputs,
135
+ stream_mode=["messages", "custom"],
136
+ subgraphs=True,
137
+ ):
138
+ nodes: tuple[str, ...]
139
+ stream_mode: str
140
+ messages_batch: Sequence[BaseMessage]
141
+ logger.trace(
142
+ f"nodes: {nodes}, stream_mode: {stream_mode}, messages: {messages_batch}"
143
+ )
144
+ for message in messages_batch:
145
+ if (
146
+ isinstance(
147
+ message,
148
+ (
149
+ AIMessageChunk,
150
+ AIMessage,
151
+ ),
152
+ )
153
+ and message.content
154
+ and "summarization" not in nodes
155
+ ):
156
+ content = message.content
157
+ yield self._create_chat_completion_chunk(content)
158
+
159
+ # Convert async generator to sync generator
160
+ loop = asyncio.get_event_loop()
161
+ async_gen = _async_stream()
162
+
163
+ try:
164
+ while True:
165
+ try:
166
+ item = loop.run_until_complete(async_gen.__anext__())
167
+ yield item
168
+ except StopAsyncIteration:
169
+ break
170
+ finally:
171
+ loop.run_until_complete(async_gen.aclose())
144
172
 
145
173
  def _create_chat_completion_chunk(self, content: str) -> ChatCompletionChunk:
146
174
  return ChatCompletionChunk(
@@ -178,9 +206,18 @@ def _process_langchain_messages(
178
206
  messages: Sequence[BaseMessage],
179
207
  custom_inputs: Optional[dict[str, Any]] = None,
180
208
  ) -> dict[str, Any] | Any:
209
+ """Process LangChain messages using async LangGraph calls internally."""
210
+ import asyncio
211
+
181
212
  if isinstance(app, LanggraphChatModel):
182
213
  app = app.graph
183
- return app.invoke({"messages": messages}, config=custom_inputs)
214
+
215
+ # Use async ainvoke internally for parallel execution
216
+ async def _async_invoke():
217
+ return await app.ainvoke({"messages": messages}, config=custom_inputs)
218
+
219
+ loop = asyncio.get_event_loop()
220
+ return loop.run_until_complete(_async_invoke())
184
221
 
185
222
 
186
223
  def _process_langchain_messages_stream(
@@ -188,6 +225,9 @@ def _process_langchain_messages_stream(
188
225
  messages: Sequence[BaseMessage],
189
226
  custom_inputs: Optional[dict[str, Any]] = None,
190
227
  ) -> Generator[AIMessageChunk, None, None]:
228
+ """Process LangChain messages in streaming mode using async LangGraph calls internally."""
229
+ import asyncio
230
+
191
231
  if isinstance(app, LanggraphChatModel):
192
232
  app = app.graph
193
233
 
@@ -196,32 +236,48 @@ def _process_langchain_messages_stream(
196
236
  custom_inputs = custom_inputs.get("configurable", custom_inputs or {})
197
237
  context: Context = Context(**custom_inputs)
198
238
 
199
- for nodes, stream_mode, messages in app.stream(
200
- {"messages": messages},
201
- context=context,
202
- config=custom_inputs,
203
- stream_mode=["messages", "custom"],
204
- subgraphs=True,
205
- ):
206
- nodes: tuple[str, ...]
207
- stream_mode: str
208
- messages: Sequence[BaseMessage]
209
- logger.trace(
210
- f"nodes: {nodes}, stream_mode: {stream_mode}, messages: {messages}"
211
- )
212
- for message in messages:
213
- if (
214
- isinstance(
215
- message,
216
- (
217
- AIMessageChunk,
218
- AIMessage,
219
- ),
220
- )
221
- and message.content
222
- and "summarization" not in nodes
223
- ):
224
- yield message
239
+ # Use async astream internally for parallel execution
240
+ async def _async_stream():
241
+ async for nodes, stream_mode, stream_messages in app.astream(
242
+ {"messages": messages},
243
+ context=context,
244
+ config=custom_inputs,
245
+ stream_mode=["messages", "custom"],
246
+ subgraphs=True,
247
+ ):
248
+ nodes: tuple[str, ...]
249
+ stream_mode: str
250
+ stream_messages: Sequence[BaseMessage]
251
+ logger.trace(
252
+ f"nodes: {nodes}, stream_mode: {stream_mode}, messages: {stream_messages}"
253
+ )
254
+ for message in stream_messages:
255
+ if (
256
+ isinstance(
257
+ message,
258
+ (
259
+ AIMessageChunk,
260
+ AIMessage,
261
+ ),
262
+ )
263
+ and message.content
264
+ and "summarization" not in nodes
265
+ ):
266
+ yield message
267
+
268
+ # Convert async generator to sync generator
269
+ loop = asyncio.get_event_loop()
270
+ async_gen = _async_stream()
271
+
272
+ try:
273
+ while True:
274
+ try:
275
+ item = loop.run_until_complete(async_gen.__anext__())
276
+ yield item
277
+ except StopAsyncIteration:
278
+ break
279
+ finally:
280
+ loop.run_until_complete(async_gen.aclose())
225
281
 
226
282
 
227
283
  def _process_mlflow_messages(
@@ -272,7 +328,7 @@ def process_messages_stream(
272
328
  Process messages through a ChatAgent in streaming mode.
273
329
 
274
330
  Utility function that normalizes message input formats and
275
- streams the agent's responses as they're generated.
331
+ streams the agent's responses as they're generated using async LangGraph calls internally.
276
332
 
277
333
  Args:
278
334
  app: The ChatAgent to process messages with
@@ -302,7 +358,7 @@ def process_messages(
302
358
  Process messages through a ChatAgent in batch mode.
303
359
 
304
360
  Utility function that normalizes message input formats and
305
- returns the complete response from the agent.
361
+ returns the complete response from the agent using async LangGraph calls internally.
306
362
 
307
363
  Args:
308
364
  app: The ChatAgent to process messages with
dao_ai/nodes.py CHANGED
@@ -69,7 +69,7 @@ def summarization_node(app_model: AppModel) -> RunnableLike:
69
69
 
70
70
 
71
71
  def call_agent_with_summarized_messages(agent: CompiledStateGraph) -> RunnableLike:
72
- def call_agent(state: SharedState, runtime: Runtime[Context]) -> SharedState:
72
+ async def call_agent(state: SharedState, runtime: Runtime[Context]) -> SharedState:
73
73
  logger.debug(f"Calling agent {agent.name} with summarized messages")
74
74
 
75
75
  # Get the summarized messages from the summarization node
@@ -81,7 +81,9 @@ def call_agent_with_summarized_messages(agent: CompiledStateGraph) -> RunnableLi
81
81
  "messages": messages,
82
82
  }
83
83
 
84
- response: dict[str, Any] = agent.invoke(input=input, context=runtime.context)
84
+ response: dict[str, Any] = await agent.ainvoke(
85
+ input=input, context=runtime.context
86
+ )
85
87
  response_messages = response.get("messages", [])
86
88
  logger.debug(f"Agent returned {len(response_messages)} messages")
87
89
 
@@ -193,7 +195,9 @@ def message_hook_node(config: AppConfig) -> RunnableLike:
193
195
  message_hooks: Sequence[Callable[..., Any]] = create_hooks(config.app.message_hooks)
194
196
 
195
197
  @mlflow.trace()
196
- def message_hook(state: IncomingState, runtime: Runtime[Context]) -> SharedState:
198
+ async def message_hook(
199
+ state: IncomingState, runtime: Runtime[Context]
200
+ ) -> SharedState:
197
201
  logger.debug("Running message validation")
198
202
  response: dict[str, Any] = {"is_valid": True, "message_error": None}
199
203
 
dao_ai/tools/agent.py CHANGED
@@ -40,16 +40,16 @@ def create_agent_endpoint_tool(
40
40
 
41
41
  doc: str = description + "\n" + doc_signature
42
42
 
43
- def agent_endpoint(prompt: str) -> AIMessage:
43
+ async def agent_endpoint(prompt: str) -> AIMessage:
44
44
  model: LanguageModelLike = llm.as_chat_model()
45
45
  messages: Sequence[BaseMessage] = [HumanMessage(content=prompt)]
46
- response: AIMessage = model.invoke(messages)
46
+ response: AIMessage = await model.ainvoke(messages)
47
47
  return response
48
48
 
49
49
  name: str = name if name else agent_endpoint.__name__
50
50
 
51
51
  structured_tool: StructuredTool = StructuredTool.from_function(
52
- func=agent_endpoint, name=name, description=doc, parse_docstring=False
52
+ coroutine=agent_endpoint, name=name, description=doc, parse_docstring=False
53
53
  )
54
54
 
55
55
  return structured_tool
dao_ai/tools/genie.py CHANGED
@@ -62,14 +62,16 @@ def create_genie_tool(
62
62
 
63
63
  doc: str = description + "\n" + doc_signature
64
64
 
65
- def genie_tool(question: str) -> GenieResponse:
65
+ async def genie_tool(question: str) -> GenieResponse:
66
+ # Use sync API for now since Genie doesn't support async yet
67
+ # Can be easily updated to await when Genie gets async support
66
68
  response: GenieResponse = genie.ask_question(question)
67
69
  return response
68
70
 
69
71
  name: str = name if name else genie_tool.__name__
70
72
 
71
73
  structured_tool: StructuredTool = StructuredTool.from_function(
72
- func=genie_tool, name=name, description=doc, parse_docstring=False
74
+ coroutine=genie_tool, name=name, description=doc, parse_docstring=False
73
75
  )
74
76
 
75
77
  return structured_tool
@@ -50,7 +50,7 @@ def add_human_in_the_loop(
50
50
  logger.debug(f"Wrapping tool {tool} with human-in-the-loop functionality")
51
51
 
52
52
  @create_tool(tool.name, description=tool.description, args_schema=tool.args_schema)
53
- def call_tool_with_interrupt(config: RunnableConfig, **tool_input) -> Any:
53
+ async def call_tool_with_interrupt(config: RunnableConfig, **tool_input) -> Any:
54
54
  logger.debug(f"call_tool_with_interrupt: {tool.name} with input: {tool_input}")
55
55
  request: HumanInterrupt = {
56
56
  "action_request": {
@@ -66,10 +66,10 @@ def add_human_in_the_loop(
66
66
  logger.debug(f"Human interrupt response: {response}")
67
67
 
68
68
  if response["type"] == "accept":
69
- tool_response = tool.invoke(tool_input, config=config)
69
+ tool_response = await tool.ainvoke(tool_input, config=config)
70
70
  elif response["type"] == "edit":
71
71
  tool_input = response["args"]["args"]
72
- tool_response = tool.invoke(tool_input, config=config)
72
+ tool_response = await tool.ainvoke(tool_input, config=config)
73
73
  elif response["type"] == "response":
74
74
  user_feedback = response["args"]
75
75
  tool_response = user_feedback
dao_ai/tools/mcp.py CHANGED
@@ -77,6 +77,8 @@ def create_mcp_tools(
77
77
  logger.error(f"Failed to list MCP tools: {e}")
78
78
  return []
79
79
 
80
+ # Note: This still needs to run sync during tool creation/registration
81
+ # The actual tool execution will be async
80
82
  try:
81
83
  mcp_tools: list | ListToolsResult = asyncio.run(_list_mcp_tools())
82
84
  if isinstance(mcp_tools, ListToolsResult):
@@ -96,22 +98,19 @@ def create_mcp_tools(
96
98
  description=mcp_tool.description or f"MCP tool: {mcp_tool.name}",
97
99
  args_schema=mcp_tool.inputSchema,
98
100
  )
99
- def tool_wrapper(**kwargs):
101
+ async def tool_wrapper(**kwargs):
100
102
  """Execute MCP tool with fresh session and authentication."""
101
103
  logger.debug(f"Invoking MCP tool {mcp_tool.name} with fresh session")
102
104
 
103
- async def _invoke():
104
- connection = _create_fresh_connection()
105
- client = MultiServerMCPClient({function.name: connection})
105
+ connection = _create_fresh_connection()
106
+ client = MultiServerMCPClient({function.name: connection})
106
107
 
107
- try:
108
- async with client.session(function.name) as session:
109
- return await session.call_tool(mcp_tool.name, kwargs)
110
- except Exception as e:
111
- logger.error(f"MCP tool {mcp_tool.name} failed: {e}")
112
- raise
113
-
114
- return asyncio.run(_invoke())
108
+ try:
109
+ async with client.session(function.name) as session:
110
+ return await session.call_tool(mcp_tool.name, kwargs)
111
+ except Exception as e:
112
+ logger.error(f"MCP tool {mcp_tool.name} failed: {e}")
113
+ raise
115
114
 
116
115
  return as_human_in_the_loop(tool_wrapper, function)
117
116
 
dao_ai/utils.py CHANGED
@@ -48,6 +48,7 @@ def get_installed_packages() -> dict[str, str]:
48
48
  f"langchain=={version('langchain')}",
49
49
  f"langchain-mcp-adapters=={version('langchain-mcp-adapters')}",
50
50
  f"langchain-openai=={version('langchain-openai')}",
51
+ f"langchain-tavily=={version('langchain-tavily')}",
51
52
  f"langgraph=={version('langgraph')}",
52
53
  f"langgraph-checkpoint-postgres=={version('langgraph-checkpoint-postgres')}",
53
54
  f"langgraph-prebuilt=={version('langgraph-prebuilt')}",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dao-ai
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
5
5
  Project-URL: Homepage, https://github.com/natefleming/dao-ai
6
6
  Project-URL: Documentation, https://natefleming.github.io/dao-ai
@@ -30,6 +30,7 @@ Requires-Dist: databricks-sdk[openai]>=0.55.0
30
30
  Requires-Dist: duckduckgo-search>=8.0.2
31
31
  Requires-Dist: grandalf>=0.8
32
32
  Requires-Dist: langchain-mcp-adapters>=0.1.9
33
+ Requires-Dist: langchain-tavily>=0.2.11
33
34
  Requires-Dist: langchain>=0.3.27
34
35
  Requires-Dist: langgraph-checkpoint-postgres>=2.0.23
35
36
  Requires-Dist: langgraph-supervisor>=0.0.29
@@ -4,15 +4,15 @@ dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
4
4
  dao_ai/chat_models.py,sha256=uhwwOTeLyHWqoTTgHrs4n5iSyTwe4EQcLKnh3jRxPWI,8626
5
5
  dao_ai/cli.py,sha256=Aez2TQW3Q8Ho1IaIkRggt0NevDxAAVPjXkePC5GPJF0,20429
6
6
  dao_ai/config.py,sha256=VyHqkW1UMQJ0fzyme1fV_3fi_6wDmKRQeCrx881fDQ4,45173
7
- dao_ai/graph.py,sha256=rIm6cLsWwViB3L1dIZp9qc-U-JgFNB5ngEi22Y3iVGQ,7806
7
+ dao_ai/graph.py,sha256=kXaGLGFVekDWqm-AHzti6LmrXnyi99VQ-AdCGuNb_xM,7831
8
8
  dao_ai/guardrails.py,sha256=-Qh0f_2Db9t4Nbrrx9FM7tnpqShjMoyxepZ0HByItfU,4027
9
9
  dao_ai/messages.py,sha256=tRZQTeb5YFKu8cm1xeaCkKhidq-0tdzncNEzVePvits,6806
10
- dao_ai/models.py,sha256=wME6hS7w2BTS4SUlhEYuCExMOYStnWk-YWsyatAHYyY,11836
11
- dao_ai/nodes.py,sha256=nusBk8tBLY4JlkmzObCKwDe5JR11A8XzYcenC-yS43o,8406
10
+ dao_ai/models.py,sha256=Zf5Rqus5xcdpxSvuLlDau4JM1j1fF9v_MnQ7HW4BXU4,13862
11
+ dao_ai/nodes.py,sha256=SSuFNTXOdFaKg_aX-yUkQO7fM9wvNGu14lPXKDapU1U,8461
12
12
  dao_ai/prompts.py,sha256=vpmIbWs_szXUgNNDs5Gh2LcxKZti5pHDKSfoClUcgX0,1289
13
13
  dao_ai/state.py,sha256=GwbMbd1TWZx1T5iQrEOX6_rpxOitlmyeJ8dMr2o_pag,1031
14
14
  dao_ai/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- dao_ai/utils.py,sha256=HOSnt9_9kJR8K8d4HfCUmPBfUsM6IYVI2jKxVFeZOvM,4157
15
+ dao_ai/utils.py,sha256=dkZTXNN6q0xwkrvSWdNq8937W2xGuLCRWRb6hRQM6kA,4217
16
16
  dao_ai/vector_search.py,sha256=jlaFS_iizJ55wblgzZmswMM3UOL-qOp2BGJc0JqXYSg,2839
17
17
  dao_ai/hooks/__init__.py,sha256=LlHGIuiZt6vGW8K5AQo1XJEkBP5vDVtMhq0IdjcLrD4,417
18
18
  dao_ai/hooks/core.py,sha256=ZShHctUSoauhBgdf1cecy9-D7J6-sGn-pKjuRMumW5U,6663
@@ -24,17 +24,17 @@ dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
24
24
  dao_ai/providers/base.py,sha256=-fjKypCOk28h6vioPfMj9YZSw_3Kcbi2nMuAyY7vX9k,1383
25
25
  dao_ai/providers/databricks.py,sha256=XRPOqwF5SeA9rPAOWMg2gSMC7lw31BI5VI_4K0KIOqo,27931
26
26
  dao_ai/tools/__init__.py,sha256=ye6MHaJY7tUnJ8336YJiLxuZr55zDPNdOw6gm7j5jlc,1103
27
- dao_ai/tools/agent.py,sha256=_XMz6HtrybpVthhRyStADechF6vXLFyK97i01XTBhtw,1868
27
+ dao_ai/tools/agent.py,sha256=WbQnyziiT12TLMrA7xK0VuOU029tdmUBXbUl-R1VZ0Q,1886
28
28
  dao_ai/tools/core.py,sha256=Kei33S8vrmvPOAyrFNekaWmV2jqZ-IPS1QDSvU7RZF0,1984
29
- dao_ai/tools/genie.py,sha256=yG0Y6FHsYJBdhY7TJoKiUF7XCmCBMsPlbASnZhsUN84,2636
30
- dao_ai/tools/human_in_the_loop.py,sha256=9HpbFQ9NW6R6eu8eNz2hWPekrB0FFUOJyfa5nUzqqNo,3446
31
- dao_ai/tools/mcp.py,sha256=rUeodCfGFXSvW-byTW9gsO6QI-p5SZkAP8ZFCFzREY0,4400
29
+ dao_ai/tools/genie.py,sha256=GzV5lfDYKmzW_lSLxAsPaTwnzX6GxQOB1UcLaTDqpfY,2787
30
+ dao_ai/tools/human_in_the_loop.py,sha256=IBmQJmpxkdDxnBNyABc_-dZhhsQlTNTkPyUXgkHKIgY,3466
31
+ dao_ai/tools/mcp.py,sha256=auEt_dwv4J26fr5AgLmwmnAsI894-cyuvkvjItzAUxs,4419
32
32
  dao_ai/tools/python.py,sha256=XcQiTMshZyLUTVR5peB3vqsoUoAAy8gol9_pcrhddfI,1831
33
33
  dao_ai/tools/time.py,sha256=Y-23qdnNHzwjvnfkWvYsE7PoWS1hfeKy44tA7sCnNac,8759
34
34
  dao_ai/tools/unity_catalog.py,sha256=PXfLj2EgyQgaXq4Qq3t25AmTC4KyVCF_-sCtg6enens,1404
35
35
  dao_ai/tools/vector_search.py,sha256=EDYQs51zIPaAP0ma1D81wJT77GQ-v-cjb2XrFVWfWdg,2621
36
- dao_ai-0.0.15.dist-info/METADATA,sha256=6Tw9CZtoiSI_inDnPJouXAMd2nAbhj9xyXqJdmQslQU,41338
37
- dao_ai-0.0.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
- dao_ai-0.0.15.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
39
- dao_ai-0.0.15.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
40
- dao_ai-0.0.15.dist-info/RECORD,,
36
+ dao_ai-0.0.17.dist-info/METADATA,sha256=G50pidDVsQt5j4T2NhygeeYJyMOdL7fSI_zyMaWIGUo,41378
37
+ dao_ai-0.0.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
+ dao_ai-0.0.17.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
39
+ dao_ai-0.0.17.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
40
+ dao_ai-0.0.17.dist-info/RECORD,,