agentcrew-ai 0.8.2__py3-none-any.whl → 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AgentCrew/__init__.py +1 -1
- AgentCrew/modules/agents/local_agent.py +11 -0
- AgentCrew/modules/browser_automation/element_extractor.py +4 -3
- AgentCrew/modules/browser_automation/js/draw_element_boxes.js +200 -0
- AgentCrew/modules/browser_automation/js/extract_clickable_elements.js +57 -23
- AgentCrew/modules/browser_automation/js/extract_elements_by_text.js +21 -19
- AgentCrew/modules/browser_automation/js/extract_input_elements.js +22 -23
- AgentCrew/modules/browser_automation/js/filter_hidden_elements.js +104 -0
- AgentCrew/modules/browser_automation/js/remove_element_boxes.js +29 -0
- AgentCrew/modules/browser_automation/js_loader.py +385 -92
- AgentCrew/modules/browser_automation/service.py +118 -347
- AgentCrew/modules/browser_automation/tool.py +28 -29
- AgentCrew/modules/chat/message/conversation.py +9 -8
- AgentCrew/modules/console/input_handler.py +2 -0
- AgentCrew/modules/console/ui_effects.py +3 -4
- AgentCrew/modules/custom_llm/service.py +25 -3
- AgentCrew/modules/file_editing/tool.py +9 -11
- AgentCrew/modules/google/native_service.py +13 -0
- AgentCrew/modules/llm/constants.py +38 -1
- AgentCrew/modules/llm/model_registry.py +9 -0
- AgentCrew/modules/llm/types.py +12 -1
- AgentCrew/modules/memory/base_service.py +2 -2
- AgentCrew/modules/memory/chroma_service.py +80 -138
- AgentCrew/modules/memory/tool.py +15 -15
- AgentCrew/modules/openai/response_service.py +19 -11
- AgentCrew/modules/openai/service.py +15 -0
- AgentCrew/modules/prompts/constants.py +27 -14
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/METADATA +2 -2
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/RECORD +33 -30
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/WHEEL +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/entry_points.txt +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/licenses/LICENSE +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.3.dist-info}/top_level.txt +0 -0
|
@@ -54,7 +54,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
54
54
|
## set to groq if key available
|
|
55
55
|
if self.llm_service:
|
|
56
56
|
if self.llm_service.provider_name == "google":
|
|
57
|
-
self.llm_service.model = "gemini-2.5-flash-lite
|
|
57
|
+
self.llm_service.model = "gemini-2.5-flash-lite"
|
|
58
58
|
elif self.llm_service.provider_name == "claude":
|
|
59
59
|
self.llm_service.model = "claude-3-5-haiku-latest"
|
|
60
60
|
elif self.llm_service.provider_name == "openai":
|
|
@@ -241,88 +241,66 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
241
241
|
session_id = operation_data["session_id"]
|
|
242
242
|
|
|
243
243
|
# Use the existing storage logic but make it synchronous
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
# avaialble_ids = collection.get(
|
|
247
|
-
# where={
|
|
248
|
-
# "agent": agent_name,
|
|
249
|
-
# },
|
|
250
|
-
# include=[],
|
|
251
|
-
# )["ids"]
|
|
244
|
+
memory_data = None
|
|
245
|
+
retried = 0
|
|
252
246
|
if self.llm_service:
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
247
|
+
while retried < 3:
|
|
248
|
+
try:
|
|
249
|
+
# Process with LLM using asyncio.run to handle async call in worker thread
|
|
250
|
+
if self.current_conversation_context.get(session_id, ""):
|
|
251
|
+
analyzed_prompt = PRE_ANALYZE_WITH_CONTEXT_PROMPT.replace(
|
|
252
|
+
"{conversation_context}",
|
|
253
|
+
f"""<PREVIOUS_CONVERSATION_CONTEXT>
|
|
254
|
+
{self.current_conversation_context[session_id]}
|
|
255
|
+
</PREVIOUS_CONVERSATION_CONTEXT>""",
|
|
256
|
+
)
|
|
257
|
+
else:
|
|
258
|
+
analyzed_prompt = PRE_ANALYZE_PROMPT
|
|
259
|
+
analyzed_prompt = (
|
|
260
|
+
analyzed_prompt.replace(
|
|
261
|
+
"{current_date}",
|
|
262
|
+
datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
|
|
263
|
+
)
|
|
264
|
+
.replace("{user_message}", user_message)
|
|
265
|
+
.replace("{assistant_response}", assistant_response)
|
|
261
266
|
)
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
analyzed_prompt = (
|
|
265
|
-
analyzed_prompt.replace(
|
|
266
|
-
"{current_date}",
|
|
267
|
-
datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
|
|
267
|
+
analyzed_text = await self.llm_service.process_message(
|
|
268
|
+
analyzed_prompt
|
|
268
269
|
)
|
|
269
|
-
.
|
|
270
|
-
.
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
# "MEMORY" in memory_data
|
|
290
|
-
# and "USER_REQUEST" not in memory_data["MEMORY"]
|
|
291
|
-
# ):
|
|
292
|
-
# memory_data["MEMORY"]["USER_REQUEST"] = user_message
|
|
293
|
-
# if (
|
|
294
|
-
# "MEMORY" in memory_data
|
|
295
|
-
# and "ASSISTANT_RESPONSE" not in memory_data["MEMORY"]
|
|
296
|
-
# ):
|
|
297
|
-
# memory_data["MEMORY"]["ASSISTANT_RESPONSE"] = assistant_response
|
|
298
|
-
|
|
299
|
-
except Exception as e:
|
|
300
|
-
logger.warning(f"Error processing conversation with LLM: {e}")
|
|
301
|
-
# Fallback to simple concatenation if LLM fails
|
|
302
|
-
memory_data = {
|
|
303
|
-
"MEMORY": {
|
|
304
|
-
"DATE": datetime.today().strftime("%Y-%m-%d"),
|
|
305
|
-
"USER_REQUEST": user_message,
|
|
306
|
-
"ASSISTANT_RESPONSE": assistant_response
|
|
307
|
-
if len(assistant_response) < 200
|
|
308
|
-
else assistant_response[:197] + "...",
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
else:
|
|
270
|
+
start_xml = analyzed_text.index("<MEMORY>")
|
|
271
|
+
end_xml = analyzed_text.index("</MEMORY>")
|
|
272
|
+
xml_content = analyzed_text[
|
|
273
|
+
start_xml : end_xml + len("</MEMORY>")
|
|
274
|
+
]
|
|
275
|
+
xml_content = (
|
|
276
|
+
xml_content.replace("&", "&")
|
|
277
|
+
.replace("'", "'")
|
|
278
|
+
.replace('"', """)
|
|
279
|
+
)
|
|
280
|
+
memory_data = xmltodict.parse(xml_content)
|
|
281
|
+
break
|
|
282
|
+
except Exception as e:
|
|
283
|
+
logger.warning(
|
|
284
|
+
f"Error processing conversation with LLM: {e} {xml_content}" # type: ignore
|
|
285
|
+
)
|
|
286
|
+
retried += 1
|
|
287
|
+
continue
|
|
288
|
+
|
|
289
|
+
if memory_data is None:
|
|
312
290
|
# Create the memory document by combining user message and response
|
|
313
291
|
memory_data = {
|
|
314
292
|
"MEMORY": {
|
|
315
293
|
"DATE": datetime.today().strftime("%Y-%m-%d"),
|
|
316
|
-
"
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
else assistant_response[:197] + "...",
|
|
294
|
+
"CONVERSATION_NOTES": {
|
|
295
|
+
"NOTE": [user_message, assistant_response]
|
|
296
|
+
},
|
|
320
297
|
}
|
|
321
298
|
}
|
|
322
299
|
|
|
323
300
|
# Store in ChromaDB (existing logic)
|
|
324
|
-
memory_id = str(uuid.uuid4())
|
|
325
301
|
timestamp = datetime.now().timestamp()
|
|
302
|
+
|
|
303
|
+
memory_header = memory_data["MEMORY"].get("HEAD", None)
|
|
326
304
|
conversation_document = xmltodict.unparse(
|
|
327
305
|
memory_data, pretty=True, full_document=False
|
|
328
306
|
)
|
|
@@ -335,27 +313,19 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
335
313
|
|
|
336
314
|
metadata = {
|
|
337
315
|
"date": timestamp,
|
|
338
|
-
"conversation_id": memory_id,
|
|
339
316
|
"session_id": session_id,
|
|
340
317
|
"agent": agent_name,
|
|
341
318
|
"type": "conversation",
|
|
342
319
|
}
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
else:
|
|
353
|
-
collection.add(
|
|
354
|
-
documents=[conversation_document],
|
|
355
|
-
embeddings=conversation_embedding,
|
|
356
|
-
metadatas=[metadata],
|
|
357
|
-
ids=[memory_id],
|
|
358
|
-
)
|
|
320
|
+
if memory_header:
|
|
321
|
+
metadata["header"] = memory_header
|
|
322
|
+
|
|
323
|
+
collection.upsert(
|
|
324
|
+
ids=[f"{session_id}_{agent_name}"],
|
|
325
|
+
documents=[conversation_document],
|
|
326
|
+
embeddings=conversation_embedding,
|
|
327
|
+
metadatas=[metadata],
|
|
328
|
+
)
|
|
359
329
|
|
|
360
330
|
logger.debug(f"Stored conversation: {operation_data['operation_id']}")
|
|
361
331
|
|
|
@@ -388,7 +358,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
388
358
|
self.current_conversation_context = {}
|
|
389
359
|
self.context_embedding = []
|
|
390
360
|
|
|
391
|
-
def load_conversation_context(self, session_id: str):
|
|
361
|
+
def load_conversation_context(self, session_id: str, agent_name: str = "None"):
|
|
392
362
|
collection = self._initialize_collection()
|
|
393
363
|
latest_memory = collection.get(
|
|
394
364
|
where={
|
|
@@ -399,6 +369,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
399
369
|
self.current_conversation_context[session_id] = latest_memory["documents"][
|
|
400
370
|
-1
|
|
401
371
|
]
|
|
372
|
+
print(self.current_conversation_context[session_id])
|
|
402
373
|
|
|
403
374
|
def generate_user_context(self, user_input: str, agent_name: str = "None") -> str:
|
|
404
375
|
"""
|
|
@@ -425,7 +396,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
425
396
|
else:
|
|
426
397
|
return input
|
|
427
398
|
|
|
428
|
-
def
|
|
399
|
+
def list_memory_headers(
|
|
429
400
|
self,
|
|
430
401
|
from_date: Optional[int] = None,
|
|
431
402
|
to_date: Optional[int] = None,
|
|
@@ -450,9 +421,14 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
450
421
|
else and_conditions[0]
|
|
451
422
|
if and_conditions
|
|
452
423
|
else None,
|
|
453
|
-
include=[],
|
|
424
|
+
include=["metadatas"],
|
|
454
425
|
)
|
|
455
|
-
|
|
426
|
+
headers = []
|
|
427
|
+
if list_memory and list_memory["metadatas"]:
|
|
428
|
+
for metadata in list_memory["metadatas"]:
|
|
429
|
+
if metadata.get("header", None):
|
|
430
|
+
headers.append(metadata.get("header"))
|
|
431
|
+
return headers
|
|
456
432
|
|
|
457
433
|
def retrieve_memory(
|
|
458
434
|
self,
|
|
@@ -498,36 +474,30 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
498
474
|
if not results["documents"] or not results["documents"][0]:
|
|
499
475
|
return "No relevant memories found."
|
|
500
476
|
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
zip(results["documents"][0], results["metadatas"][0]) # type:ignore
|
|
477
|
+
conversation_chunks = []
|
|
478
|
+
for i, (id, doc, metadata) in enumerate(
|
|
479
|
+
zip(results["ids"][0], results["documents"][0], results["metadatas"][0]) # type:ignore
|
|
505
480
|
):
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
"
|
|
481
|
+
conversation_chunks.append(
|
|
482
|
+
{
|
|
483
|
+
"id": id,
|
|
484
|
+
"document": doc,
|
|
510
485
|
"timestamp": metadata.get("date", None)
|
|
511
486
|
or metadata.get("timestamp", "unknown"),
|
|
512
487
|
"relevance": results["distances"][0][i]
|
|
513
488
|
if results["distances"]
|
|
514
489
|
else 99,
|
|
515
490
|
}
|
|
516
|
-
conversation_chunks[conv_id]["chunks"].append(
|
|
517
|
-
(metadata.get("chunk_index", 0), doc)
|
|
518
491
|
)
|
|
519
492
|
|
|
520
493
|
# Sort conversations by relevance
|
|
521
|
-
sorted_conversations = sorted(
|
|
522
|
-
conversation_chunks.items(), key=lambda x: x[1]["relevance"]
|
|
523
|
-
)
|
|
494
|
+
sorted_conversations = sorted(conversation_chunks, key=lambda x: x["relevance"])
|
|
524
495
|
|
|
525
496
|
# Format the output
|
|
526
497
|
output = []
|
|
527
|
-
for
|
|
498
|
+
for conv_data in sorted_conversations:
|
|
528
499
|
# Sort chunks by index
|
|
529
|
-
|
|
530
|
-
conversation_text = "\n".join([chunk for _, chunk in sorted_chunks])
|
|
500
|
+
conversation_text = conv_data["document"]
|
|
531
501
|
if conv_data["relevance"] > RELEVANT_THRESHOLD:
|
|
532
502
|
continue
|
|
533
503
|
# Format timestamp
|
|
@@ -544,22 +514,10 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
544
514
|
timestamp = conv_data["timestamp"]
|
|
545
515
|
|
|
546
516
|
output.append(
|
|
547
|
-
f"--- Memory from {timestamp}
|
|
517
|
+
f"--- Memory from {timestamp} [id:{conv_data['id']}] ---\n{conversation_text}\n---"
|
|
548
518
|
)
|
|
549
519
|
|
|
550
520
|
memories = "\n\n".join(output)
|
|
551
|
-
# if self.llm_service:
|
|
552
|
-
# try:
|
|
553
|
-
# return await self.llm_service.process_message(
|
|
554
|
-
# POST_RETRIEVE_MEMORY.replace("{keywords}", keywords).replace(
|
|
555
|
-
# "{memory_list}", memories
|
|
556
|
-
# )
|
|
557
|
-
# )
|
|
558
|
-
# except Exception as e:
|
|
559
|
-
# logger.warning(f"Error processing retrieved memories with LLM: {e}")
|
|
560
|
-
# # Fallback to returning raw memories if LLM processing fails
|
|
561
|
-
# return memories
|
|
562
|
-
# else:
|
|
563
521
|
return memories
|
|
564
522
|
|
|
565
523
|
def _cosine_similarity(self, vec_a, vec_b):
|
|
@@ -663,23 +621,8 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
663
621
|
"count": 0,
|
|
664
622
|
}
|
|
665
623
|
|
|
666
|
-
# Collect all conversation IDs related to the topic
|
|
667
|
-
conversation_ids = set()
|
|
668
|
-
if results["metadatas"] and results["metadatas"][0]:
|
|
669
|
-
for metadata in results["metadatas"][0]:
|
|
670
|
-
conv_id = metadata.get("conversation_id")
|
|
671
|
-
if conv_id:
|
|
672
|
-
conversation_ids.add(conv_id)
|
|
673
|
-
|
|
674
|
-
# Get all memories to find those with matching conversation IDs
|
|
675
|
-
all_memories = collection.get()
|
|
676
|
-
|
|
677
624
|
# Find IDs to remove
|
|
678
|
-
ids_to_remove = []
|
|
679
|
-
if all_memories["metadatas"]:
|
|
680
|
-
for i, metadata in enumerate(all_memories["metadatas"]):
|
|
681
|
-
if metadata.get("conversation_id") in conversation_ids:
|
|
682
|
-
ids_to_remove.append(all_memories["ids"][i])
|
|
625
|
+
ids_to_remove = results["ids"][0]
|
|
683
626
|
|
|
684
627
|
# Remove the memories
|
|
685
628
|
if ids_to_remove:
|
|
@@ -689,7 +632,6 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
689
632
|
"success": True,
|
|
690
633
|
"message": f"Successfully removed {len(ids_to_remove)} memory chunks related to '{topic}'",
|
|
691
634
|
"count": len(ids_to_remove),
|
|
692
|
-
"conversations_affected": len(conversation_ids),
|
|
693
635
|
}
|
|
694
636
|
|
|
695
637
|
except Exception as e:
|
AgentCrew/modules/memory/tool.py
CHANGED
|
@@ -62,12 +62,12 @@ def get_memory_forget_tool_handler(memory_service: BaseMemoryService) -> Callabl
|
|
|
62
62
|
try:
|
|
63
63
|
result = memory_service.forget_ids(ids, agent_name)
|
|
64
64
|
return (
|
|
65
|
-
f"
|
|
65
|
+
f"Removed memories: {result.get('message', 'Success')}"
|
|
66
66
|
if result.get("success")
|
|
67
|
-
else f"
|
|
67
|
+
else f"Removal incomplete: {result.get('message', 'Not found')}"
|
|
68
68
|
)
|
|
69
69
|
except Exception as e:
|
|
70
|
-
return f"
|
|
70
|
+
return f"Memories removal failed: {str(e)}"
|
|
71
71
|
|
|
72
72
|
return handle_memory_forget
|
|
73
73
|
|
|
@@ -162,11 +162,11 @@ def get_memory_retrieve_tool_handler(memory_service: BaseMemoryService) -> Calla
|
|
|
162
162
|
to_date = params.get("to_date", None)
|
|
163
163
|
|
|
164
164
|
if not query:
|
|
165
|
-
raise ValueError("
|
|
165
|
+
raise ValueError("Phrases required for memory search. Try again.")
|
|
166
166
|
|
|
167
167
|
if len(query) < 3:
|
|
168
168
|
raise ValueError(
|
|
169
|
-
f"
|
|
169
|
+
f"Search term '{query}' too short. Try again with more semantica and descriptive phrases."
|
|
170
170
|
)
|
|
171
171
|
|
|
172
172
|
# Use provided agent_name or fallback to current agent
|
|
@@ -188,13 +188,13 @@ def get_memory_retrieve_tool_handler(memory_service: BaseMemoryService) -> Calla
|
|
|
188
188
|
)
|
|
189
189
|
|
|
190
190
|
if not result or result.strip() == "":
|
|
191
|
-
return f"
|
|
191
|
+
return f"No memories found for '{query}'. Try broader phrases or related terms."
|
|
192
192
|
|
|
193
193
|
# Count memories for user feedback
|
|
194
|
-
return f"
|
|
194
|
+
return f"Found relevant memories:\n\n{result}"
|
|
195
195
|
|
|
196
196
|
except Exception as e:
|
|
197
|
-
return f"
|
|
197
|
+
return f"Memory search failed: {str(e)}"
|
|
198
198
|
|
|
199
199
|
return handle_memory_retrieve
|
|
200
200
|
|
|
@@ -261,15 +261,15 @@ def get_adapt_tool_handler(persistence_service: Any) -> Callable:
|
|
|
261
261
|
scope = params.get("scope", "global").strip().lower()
|
|
262
262
|
|
|
263
263
|
if not behavior_id:
|
|
264
|
-
return "
|
|
264
|
+
return "Behavior ID required (e.g., 'communication_style_technical')."
|
|
265
265
|
|
|
266
266
|
if not behavior:
|
|
267
|
-
return "
|
|
267
|
+
return "Behavior description required in 'when...do...' format."
|
|
268
268
|
|
|
269
269
|
# Validate format
|
|
270
270
|
behavior_lower = behavior.lower()
|
|
271
271
|
if not behavior_lower.startswith("when "):
|
|
272
|
-
return "
|
|
272
|
+
return "Use format: 'when [condition], [action]'"
|
|
273
273
|
|
|
274
274
|
current_agent = AgentManager.get_instance().get_current_agent()
|
|
275
275
|
agent_name = current_agent.name if current_agent else "default"
|
|
@@ -279,14 +279,14 @@ def get_adapt_tool_handler(persistence_service: Any) -> Callable:
|
|
|
279
279
|
agent_name, behavior_id, behavior, scope == "local"
|
|
280
280
|
)
|
|
281
281
|
return (
|
|
282
|
-
f"
|
|
282
|
+
f"Stored behavior '{behavior_id}': {behavior}"
|
|
283
283
|
if success
|
|
284
|
-
else "
|
|
284
|
+
else "Storage completed but may need verification."
|
|
285
285
|
)
|
|
286
286
|
except ValueError as e:
|
|
287
|
-
return f"
|
|
287
|
+
return f"Invalid format: {str(e)}"
|
|
288
288
|
except Exception as e:
|
|
289
|
-
return f"
|
|
289
|
+
return f"Storage failed: {str(e)}"
|
|
290
290
|
|
|
291
291
|
return handle_adapt
|
|
292
292
|
|
|
@@ -225,8 +225,16 @@ class OpenAIResponseService(BaseLLMService):
|
|
|
225
225
|
"input": input_data,
|
|
226
226
|
"stream": True,
|
|
227
227
|
"instructions": self.system_prompt or None,
|
|
228
|
+
"temperature": self.temperature,
|
|
228
229
|
}
|
|
229
230
|
|
|
231
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
232
|
+
if forced_sample_params:
|
|
233
|
+
if forced_sample_params.temperature is not None:
|
|
234
|
+
stream_params["temperature"] = forced_sample_params.temperature
|
|
235
|
+
if forced_sample_params.top_p is not None:
|
|
236
|
+
stream_params["top_p"] = forced_sample_params.top_p
|
|
237
|
+
|
|
230
238
|
# Add reasoning configuration for thinking models
|
|
231
239
|
if "thinking" in ModelRegistry.get_model_capabilities(full_model_id):
|
|
232
240
|
if self.reasoning_effort:
|
|
@@ -251,17 +259,17 @@ class OpenAIResponseService(BaseLLMService):
|
|
|
251
259
|
|
|
252
260
|
stream_params["tools"] = all_tools
|
|
253
261
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
262
|
+
if (
|
|
263
|
+
"structured_output" in ModelRegistry.get_model_capabilities(full_model_id)
|
|
264
|
+
and self.structured_output
|
|
265
|
+
):
|
|
266
|
+
stream_params["text"] = {
|
|
267
|
+
"format": {
|
|
268
|
+
"name": "default",
|
|
269
|
+
"type": "json_schema",
|
|
270
|
+
"json_schema": self.structured_output,
|
|
271
|
+
}
|
|
272
|
+
}
|
|
265
273
|
|
|
266
274
|
return await self.client.responses.create(**stream_params)
|
|
267
275
|
|
|
@@ -196,6 +196,7 @@ class OpenAIService(BaseLLMService):
|
|
|
196
196
|
"stream_options": {"include_usage": True},
|
|
197
197
|
"max_tokens": 20000,
|
|
198
198
|
}
|
|
199
|
+
|
|
199
200
|
if "thinking" in ModelRegistry.get_model_capabilities(full_model_id):
|
|
200
201
|
stream_params.pop("max_tokens", None)
|
|
201
202
|
if self.reasoning_effort:
|
|
@@ -203,6 +204,20 @@ class OpenAIService(BaseLLMService):
|
|
|
203
204
|
else:
|
|
204
205
|
stream_params["temperature"] = self.temperature
|
|
205
206
|
stream_params["top_p"] = 0.95
|
|
207
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
208
|
+
if forced_sample_params:
|
|
209
|
+
if forced_sample_params.temperature is not None:
|
|
210
|
+
stream_params["temperature"] = forced_sample_params.temperature
|
|
211
|
+
if forced_sample_params.top_p is not None:
|
|
212
|
+
stream_params["top_p"] = forced_sample_params.top_p
|
|
213
|
+
if forced_sample_params.frequency_penalty is not None:
|
|
214
|
+
stream_params["frequency_penalty"] = (
|
|
215
|
+
forced_sample_params.frequency_penalty
|
|
216
|
+
)
|
|
217
|
+
if forced_sample_params.presence_penalty is not None:
|
|
218
|
+
stream_params["presence_penalty"] = (
|
|
219
|
+
forced_sample_params.presence_penalty
|
|
220
|
+
)
|
|
206
221
|
|
|
207
222
|
# Add system message if provided
|
|
208
223
|
if self.system_prompt:
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
PRE_ANALYZE_PROMPT = """
|
|
2
2
|
<MEMORY_PROCESSING_REQUEST>
|
|
3
|
-
Extract this conversation for AI memory storage. Create a comprehensive xml record that includes all fields in <OUTPUT_FORMAT> below.
|
|
3
|
+
Extract this conversation for AI memory storage. Create a comprehensive xml record must start with <MEMORY> that includes all fields in <OUTPUT_FORMAT> below.
|
|
4
4
|
<OUTPUT_FORMAT>
|
|
5
|
-
1.
|
|
5
|
+
1. HEAD: one short sentence that describe this conversation.
|
|
6
6
|
2. DATE: {current_date}
|
|
7
|
-
3.
|
|
8
|
-
4.
|
|
7
|
+
3. CONTEXT: Background information relevant to understanding this exchange
|
|
8
|
+
4. INSIGHTS: Important insights, lessons learned, or conclusions drawn from the conversation
|
|
9
9
|
5. ENTITIES: Important people, organizations, products, or concepts mentioned including essential facts, concepts, or data points discussed about that entity
|
|
10
10
|
6. DOMAINS: The subject domain(s) this conversation relates to
|
|
11
|
+
7. RESOURCES: Important urls, file paths has been mentioned in conversation.
|
|
11
12
|
8. CONVERSATION_NOTES: The key extracted information from conversation.
|
|
12
13
|
</OUTPUT_FORMAT>
|
|
13
14
|
|
|
@@ -22,7 +23,7 @@ PRE_ANALYZE_PROMPT = """
|
|
|
22
23
|
|
|
23
24
|
<PROCESSING_INSTRUCTIONS>
|
|
24
25
|
1. Format each section with its heading in ALL CAPS as a tag wrapped around the content.
|
|
25
|
-
2. If a section would be empty, include the heading with
|
|
26
|
+
2. If a section would be empty, include the heading with empty text "" as the content.
|
|
26
27
|
3. Focus on extracting factual information rather than making assumptions.
|
|
27
28
|
4. <CONVERSATION_NOTES> should capture all the key points and the direction of flow of the whole conversation in concise.
|
|
28
29
|
5. No explanations or additional text.
|
|
@@ -30,10 +31,12 @@ PRE_ANALYZE_PROMPT = """
|
|
|
30
31
|
|
|
31
32
|
<EXAMPLES>
|
|
32
33
|
<MEMORY>
|
|
33
|
-
<
|
|
34
|
+
<HEAD>discussion about donald trump</HEAD>
|
|
34
35
|
<DATE>2025-01-03</DATE>
|
|
35
|
-
<
|
|
36
|
-
<
|
|
36
|
+
<CONTEXT>discussed with user about details and facts around Donald Trump</CONTEXT>
|
|
37
|
+
<INSIGHTS>
|
|
38
|
+
<INSIGHT>To get accurate information, assistant need to collect from multi sources</INSIGHT>
|
|
39
|
+
</INSIGHTS>
|
|
37
40
|
<ENTITIES>
|
|
38
41
|
<ENTITY>
|
|
39
42
|
<NAME>DONALP TRUMP</NAME>
|
|
@@ -43,6 +46,9 @@ PRE_ANALYZE_PROMPT = """
|
|
|
43
46
|
<DOMAINS>
|
|
44
47
|
<DOMAIN>Politics</DOMAIN>
|
|
45
48
|
</DOMAINS>
|
|
49
|
+
<RESOURCES>
|
|
50
|
+
<RESOURCE>https://en.wikipedia.org/wiki/Donald_Trump</RESOURCE>
|
|
51
|
+
</RESOURCES>
|
|
46
52
|
<CONVERSATION_NOTES>
|
|
47
53
|
<NOTE>User asked about Donald Trump's background. Assistant provided details on his presidency and key events.</NOTE>
|
|
48
54
|
</CONVERSATION_NOTES>
|
|
@@ -53,16 +59,17 @@ PRE_ANALYZE_PROMPT = """
|
|
|
53
59
|
|
|
54
60
|
PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
55
61
|
<MEMORY_PROCESSING_REQUEST>
|
|
56
|
-
Extract this conversation for AI memory storage. Create a comprehensive xml record following INSTRUCTIONS that includes all fields in <OUTPUT_FORMAT> below. No explanations or additional text.
|
|
62
|
+
Extract this conversation for AI memory storage. Create a comprehensive xml record must start with <MEMORY> following INSTRUCTIONS that includes all fields in <OUTPUT_FORMAT> below. No explanations or additional text.
|
|
57
63
|
|
|
58
64
|
<INSTRUCTIONS>
|
|
59
|
-
1.
|
|
65
|
+
1. HEAD: update existed HEAD from <PREVIOUS_CONVERSATION_CONTEXT> if available or create one short sentence that describe this conversation.
|
|
60
66
|
2. DATE: {current_date}
|
|
61
|
-
3.
|
|
62
|
-
4.
|
|
67
|
+
3. CONTEXT: Merge the CONTEXT of <PREVIOUS_CONVERSATION_CONTEXT> with new context in CONVERSATION_TURN
|
|
68
|
+
4. INSIGHTS: Add to the INSIGHTS of <PREVIOUS_CONVERSATION_CONTEXT> for new important insights, lessons learned, or conclusions drawn from CONVERSATION_TURN.
|
|
63
69
|
5. ENTITIES: Add to the ENTITIES of <PREVIOUS_CONVERSATION_CONTEXT> for new important people, organizations, products, or concepts mentioned in CONVERSATION_TURN including essential facts, concepts, or data points discussed about that entity
|
|
64
70
|
6. DOMAINS: Add to the DOMAINS of <PREVIOUS_CONVERSATION_CONTEXT> for new subject domain(s) in CONVERSATION_TURN related
|
|
65
|
-
7.
|
|
71
|
+
7. RESOURCES: Add to the RESOURCES of <PREVIOUS_CONVERSATION_CONTEXT> for new important urls, file paths, mentioned in CONVERSATION_TURN
|
|
72
|
+
8. CONVERSATION_NOTES: Add the CONVERSATION_NOTES of <PREVIOUS_CONVERSATION_CONTEXT> for new key notes extracted information from CONVERSATION_TURN. PREVIOUS_CONVERSATION_CONTEXT CONVERSATION_NOTES must be keep intact.
|
|
66
73
|
</INSTRUCTIONS>
|
|
67
74
|
|
|
68
75
|
{conversation_context}
|
|
@@ -78,10 +85,13 @@ PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
|
78
85
|
|
|
79
86
|
<OUTPUT_FORMAT>
|
|
80
87
|
<MEMORY>
|
|
81
|
-
<
|
|
88
|
+
<HEAD>[head]</HEAD>
|
|
82
89
|
<DATE>[current_date]</DATE>
|
|
83
90
|
<SUMMARY>[merged_summary]</SUMMARY>
|
|
84
91
|
<CONTEXT>[merged_context]</CONTEXT>
|
|
92
|
+
<INSIGHTS>
|
|
93
|
+
<INSIGHT>[added_insight]</INSIGHT>
|
|
94
|
+
</INSIGHTS>
|
|
85
95
|
<ENTITIES>
|
|
86
96
|
<ENTITY>
|
|
87
97
|
<NAME>[added_entity_name]</NAME>
|
|
@@ -91,6 +101,9 @@ PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
|
91
101
|
<DOMAINS>
|
|
92
102
|
<DOMAIN>[added_domain]</DOMAIN>
|
|
93
103
|
</DOMAINS>
|
|
104
|
+
<RESOURCES>
|
|
105
|
+
</RESOURCE>[added_resource]</RESOURCE>
|
|
106
|
+
</RESOURCES>
|
|
94
107
|
<CONVERSATION_NOTES>
|
|
95
108
|
<NOTE>[added_notes]</NOTE>
|
|
96
109
|
</CONVERSATION_NOTES>
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agentcrew-ai
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.3
|
|
4
4
|
Summary: Multi-Agents Interactive Chat Tool
|
|
5
5
|
Author-email: Quy Truong <quy.truong@saigontechnology.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -47,7 +47,7 @@ Requires-Dist: sounddevice>=0.5.2
|
|
|
47
47
|
Requires-Dist: soundfile>=0.13.1
|
|
48
48
|
Requires-Dist: jsonref>=1.1.0
|
|
49
49
|
Requires-Dist: pychromedevtools>=0.3.3
|
|
50
|
-
Requires-Dist: html-to-markdown
|
|
50
|
+
Requires-Dist: html-to-markdown<2,>=1.14.0
|
|
51
51
|
Requires-Dist: pip-system-certs>=5.2
|
|
52
52
|
Requires-Dist: loguru>=0.7.3
|
|
53
53
|
Requires-Dist: jsonschema>=4.25.1
|