agentcrew-ai 0.8.2__py3-none-any.whl → 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AgentCrew/__init__.py +1 -1
- AgentCrew/main.py +3 -1
- AgentCrew/modules/a2a/agent_cards.py +8 -2
- AgentCrew/modules/a2a/errors.py +72 -0
- AgentCrew/modules/a2a/server.py +21 -2
- AgentCrew/modules/a2a/task_manager.py +180 -39
- AgentCrew/modules/agents/local_agent.py +11 -0
- AgentCrew/modules/browser_automation/element_extractor.py +4 -3
- AgentCrew/modules/browser_automation/js/draw_element_boxes.js +200 -0
- AgentCrew/modules/browser_automation/js/extract_clickable_elements.js +58 -26
- AgentCrew/modules/browser_automation/js/extract_elements_by_text.js +21 -19
- AgentCrew/modules/browser_automation/js/extract_input_elements.js +22 -23
- AgentCrew/modules/browser_automation/js/filter_hidden_elements.js +104 -0
- AgentCrew/modules/browser_automation/js/remove_element_boxes.js +29 -0
- AgentCrew/modules/browser_automation/js_loader.py +385 -92
- AgentCrew/modules/browser_automation/service.py +118 -347
- AgentCrew/modules/browser_automation/tool.py +28 -29
- AgentCrew/modules/chat/message/command_processor.py +7 -1
- AgentCrew/modules/chat/message/conversation.py +9 -8
- AgentCrew/modules/code_analysis/service.py +39 -0
- AgentCrew/modules/code_analysis/tool.py +10 -1
- AgentCrew/modules/console/command_handlers.py +186 -1
- AgentCrew/modules/console/completers.py +67 -0
- AgentCrew/modules/console/console_ui.py +59 -5
- AgentCrew/modules/console/display_handlers.py +12 -0
- AgentCrew/modules/console/input_handler.py +2 -0
- AgentCrew/modules/console/ui_effects.py +3 -4
- AgentCrew/modules/custom_llm/service.py +25 -3
- AgentCrew/modules/file_editing/tool.py +9 -11
- AgentCrew/modules/google/native_service.py +13 -0
- AgentCrew/modules/gui/widgets/message_bubble.py +1 -6
- AgentCrew/modules/llm/constants.py +38 -1
- AgentCrew/modules/llm/model_registry.py +9 -0
- AgentCrew/modules/llm/types.py +12 -1
- AgentCrew/modules/memory/base_service.py +2 -2
- AgentCrew/modules/memory/chroma_service.py +79 -138
- AgentCrew/modules/memory/context_persistent.py +10 -4
- AgentCrew/modules/memory/tool.py +17 -18
- AgentCrew/modules/openai/response_service.py +19 -11
- AgentCrew/modules/openai/service.py +15 -0
- AgentCrew/modules/prompts/constants.py +27 -14
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/METADATA +3 -3
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/RECORD +47 -43
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/WHEEL +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/entry_points.txt +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/licenses/LICENSE +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/top_level.txt +0 -0
|
@@ -54,7 +54,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
54
54
|
## set to groq if key available
|
|
55
55
|
if self.llm_service:
|
|
56
56
|
if self.llm_service.provider_name == "google":
|
|
57
|
-
self.llm_service.model = "gemini-2.5-flash-lite
|
|
57
|
+
self.llm_service.model = "gemini-2.5-flash-lite"
|
|
58
58
|
elif self.llm_service.provider_name == "claude":
|
|
59
59
|
self.llm_service.model = "claude-3-5-haiku-latest"
|
|
60
60
|
elif self.llm_service.provider_name == "openai":
|
|
@@ -241,88 +241,66 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
241
241
|
session_id = operation_data["session_id"]
|
|
242
242
|
|
|
243
243
|
# Use the existing storage logic but make it synchronous
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
# avaialble_ids = collection.get(
|
|
247
|
-
# where={
|
|
248
|
-
# "agent": agent_name,
|
|
249
|
-
# },
|
|
250
|
-
# include=[],
|
|
251
|
-
# )["ids"]
|
|
244
|
+
memory_data = None
|
|
245
|
+
retried = 0
|
|
252
246
|
if self.llm_service:
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
247
|
+
while retried < 3:
|
|
248
|
+
try:
|
|
249
|
+
# Process with LLM using asyncio.run to handle async call in worker thread
|
|
250
|
+
if self.current_conversation_context.get(session_id, ""):
|
|
251
|
+
analyzed_prompt = PRE_ANALYZE_WITH_CONTEXT_PROMPT.replace(
|
|
252
|
+
"{conversation_context}",
|
|
253
|
+
f"""<PREVIOUS_CONVERSATION_CONTEXT>
|
|
254
|
+
{self.current_conversation_context[session_id]}
|
|
255
|
+
</PREVIOUS_CONVERSATION_CONTEXT>""",
|
|
256
|
+
)
|
|
257
|
+
else:
|
|
258
|
+
analyzed_prompt = PRE_ANALYZE_PROMPT
|
|
259
|
+
analyzed_prompt = (
|
|
260
|
+
analyzed_prompt.replace(
|
|
261
|
+
"{current_date}",
|
|
262
|
+
datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
|
|
263
|
+
)
|
|
264
|
+
.replace("{user_message}", user_message)
|
|
265
|
+
.replace("{assistant_response}", assistant_response)
|
|
261
266
|
)
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
analyzed_prompt = (
|
|
265
|
-
analyzed_prompt.replace(
|
|
266
|
-
"{current_date}",
|
|
267
|
-
datetime.today().strftime("%Y-%m-%d %H:%M:%S"),
|
|
267
|
+
analyzed_text = await self.llm_service.process_message(
|
|
268
|
+
analyzed_prompt
|
|
268
269
|
)
|
|
269
|
-
.
|
|
270
|
-
.
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
# "MEMORY" in memory_data
|
|
290
|
-
# and "USER_REQUEST" not in memory_data["MEMORY"]
|
|
291
|
-
# ):
|
|
292
|
-
# memory_data["MEMORY"]["USER_REQUEST"] = user_message
|
|
293
|
-
# if (
|
|
294
|
-
# "MEMORY" in memory_data
|
|
295
|
-
# and "ASSISTANT_RESPONSE" not in memory_data["MEMORY"]
|
|
296
|
-
# ):
|
|
297
|
-
# memory_data["MEMORY"]["ASSISTANT_RESPONSE"] = assistant_response
|
|
298
|
-
|
|
299
|
-
except Exception as e:
|
|
300
|
-
logger.warning(f"Error processing conversation with LLM: {e}")
|
|
301
|
-
# Fallback to simple concatenation if LLM fails
|
|
302
|
-
memory_data = {
|
|
303
|
-
"MEMORY": {
|
|
304
|
-
"DATE": datetime.today().strftime("%Y-%m-%d"),
|
|
305
|
-
"USER_REQUEST": user_message,
|
|
306
|
-
"ASSISTANT_RESPONSE": assistant_response
|
|
307
|
-
if len(assistant_response) < 200
|
|
308
|
-
else assistant_response[:197] + "...",
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
else:
|
|
270
|
+
start_xml = analyzed_text.index("<MEMORY>")
|
|
271
|
+
end_xml = analyzed_text.index("</MEMORY>")
|
|
272
|
+
xml_content = analyzed_text[
|
|
273
|
+
start_xml : end_xml + len("</MEMORY>")
|
|
274
|
+
]
|
|
275
|
+
xml_content = (
|
|
276
|
+
xml_content.replace("&", "&")
|
|
277
|
+
.replace("'", "'")
|
|
278
|
+
.replace('"', """)
|
|
279
|
+
)
|
|
280
|
+
memory_data = xmltodict.parse(xml_content)
|
|
281
|
+
break
|
|
282
|
+
except Exception as e:
|
|
283
|
+
logger.warning(
|
|
284
|
+
f"Error processing conversation with LLM: {e} {xml_content}" # type: ignore
|
|
285
|
+
)
|
|
286
|
+
retried += 1
|
|
287
|
+
continue
|
|
288
|
+
|
|
289
|
+
if memory_data is None:
|
|
312
290
|
# Create the memory document by combining user message and response
|
|
313
291
|
memory_data = {
|
|
314
292
|
"MEMORY": {
|
|
315
293
|
"DATE": datetime.today().strftime("%Y-%m-%d"),
|
|
316
|
-
"
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
else assistant_response[:197] + "...",
|
|
294
|
+
"CONVERSATION_NOTES": {
|
|
295
|
+
"NOTE": [user_message, assistant_response]
|
|
296
|
+
},
|
|
320
297
|
}
|
|
321
298
|
}
|
|
322
299
|
|
|
323
300
|
# Store in ChromaDB (existing logic)
|
|
324
|
-
memory_id = str(uuid.uuid4())
|
|
325
301
|
timestamp = datetime.now().timestamp()
|
|
302
|
+
|
|
303
|
+
memory_header = memory_data["MEMORY"].get("HEAD", None)
|
|
326
304
|
conversation_document = xmltodict.unparse(
|
|
327
305
|
memory_data, pretty=True, full_document=False
|
|
328
306
|
)
|
|
@@ -335,27 +313,19 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
335
313
|
|
|
336
314
|
metadata = {
|
|
337
315
|
"date": timestamp,
|
|
338
|
-
"conversation_id": memory_id,
|
|
339
316
|
"session_id": session_id,
|
|
340
317
|
"agent": agent_name,
|
|
341
318
|
"type": "conversation",
|
|
342
319
|
}
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
else:
|
|
353
|
-
collection.add(
|
|
354
|
-
documents=[conversation_document],
|
|
355
|
-
embeddings=conversation_embedding,
|
|
356
|
-
metadatas=[metadata],
|
|
357
|
-
ids=[memory_id],
|
|
358
|
-
)
|
|
320
|
+
if memory_header:
|
|
321
|
+
metadata["header"] = memory_header
|
|
322
|
+
|
|
323
|
+
collection.upsert(
|
|
324
|
+
ids=[f"{session_id}_{agent_name}"],
|
|
325
|
+
documents=[conversation_document],
|
|
326
|
+
embeddings=conversation_embedding,
|
|
327
|
+
metadatas=[metadata],
|
|
328
|
+
)
|
|
359
329
|
|
|
360
330
|
logger.debug(f"Stored conversation: {operation_data['operation_id']}")
|
|
361
331
|
|
|
@@ -388,7 +358,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
388
358
|
self.current_conversation_context = {}
|
|
389
359
|
self.context_embedding = []
|
|
390
360
|
|
|
391
|
-
def load_conversation_context(self, session_id: str):
|
|
361
|
+
def load_conversation_context(self, session_id: str, agent_name: str = "None"):
|
|
392
362
|
collection = self._initialize_collection()
|
|
393
363
|
latest_memory = collection.get(
|
|
394
364
|
where={
|
|
@@ -425,7 +395,7 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
425
395
|
else:
|
|
426
396
|
return input
|
|
427
397
|
|
|
428
|
-
def
|
|
398
|
+
def list_memory_headers(
|
|
429
399
|
self,
|
|
430
400
|
from_date: Optional[int] = None,
|
|
431
401
|
to_date: Optional[int] = None,
|
|
@@ -450,9 +420,14 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
450
420
|
else and_conditions[0]
|
|
451
421
|
if and_conditions
|
|
452
422
|
else None,
|
|
453
|
-
include=[],
|
|
423
|
+
include=["metadatas"],
|
|
454
424
|
)
|
|
455
|
-
|
|
425
|
+
headers = []
|
|
426
|
+
if list_memory and list_memory["metadatas"]:
|
|
427
|
+
for metadata in list_memory["metadatas"]:
|
|
428
|
+
if metadata.get("header", None):
|
|
429
|
+
headers.append(metadata.get("header"))
|
|
430
|
+
return headers
|
|
456
431
|
|
|
457
432
|
def retrieve_memory(
|
|
458
433
|
self,
|
|
@@ -498,36 +473,30 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
498
473
|
if not results["documents"] or not results["documents"][0]:
|
|
499
474
|
return "No relevant memories found."
|
|
500
475
|
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
zip(results["documents"][0], results["metadatas"][0]) # type:ignore
|
|
476
|
+
conversation_chunks = []
|
|
477
|
+
for i, (id, doc, metadata) in enumerate(
|
|
478
|
+
zip(results["ids"][0], results["documents"][0], results["metadatas"][0]) # type:ignore
|
|
505
479
|
):
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
"
|
|
480
|
+
conversation_chunks.append(
|
|
481
|
+
{
|
|
482
|
+
"id": id,
|
|
483
|
+
"document": doc,
|
|
510
484
|
"timestamp": metadata.get("date", None)
|
|
511
485
|
or metadata.get("timestamp", "unknown"),
|
|
512
486
|
"relevance": results["distances"][0][i]
|
|
513
487
|
if results["distances"]
|
|
514
488
|
else 99,
|
|
515
489
|
}
|
|
516
|
-
conversation_chunks[conv_id]["chunks"].append(
|
|
517
|
-
(metadata.get("chunk_index", 0), doc)
|
|
518
490
|
)
|
|
519
491
|
|
|
520
492
|
# Sort conversations by relevance
|
|
521
|
-
sorted_conversations = sorted(
|
|
522
|
-
conversation_chunks.items(), key=lambda x: x[1]["relevance"]
|
|
523
|
-
)
|
|
493
|
+
sorted_conversations = sorted(conversation_chunks, key=lambda x: x["relevance"])
|
|
524
494
|
|
|
525
495
|
# Format the output
|
|
526
496
|
output = []
|
|
527
|
-
for
|
|
497
|
+
for conv_data in sorted_conversations:
|
|
528
498
|
# Sort chunks by index
|
|
529
|
-
|
|
530
|
-
conversation_text = "\n".join([chunk for _, chunk in sorted_chunks])
|
|
499
|
+
conversation_text = conv_data["document"]
|
|
531
500
|
if conv_data["relevance"] > RELEVANT_THRESHOLD:
|
|
532
501
|
continue
|
|
533
502
|
# Format timestamp
|
|
@@ -544,22 +513,10 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
544
513
|
timestamp = conv_data["timestamp"]
|
|
545
514
|
|
|
546
515
|
output.append(
|
|
547
|
-
f"--- Memory from {timestamp}
|
|
516
|
+
f"--- Memory from {timestamp} [id:{conv_data['id']}] ---\n{conversation_text}\n---"
|
|
548
517
|
)
|
|
549
518
|
|
|
550
519
|
memories = "\n\n".join(output)
|
|
551
|
-
# if self.llm_service:
|
|
552
|
-
# try:
|
|
553
|
-
# return await self.llm_service.process_message(
|
|
554
|
-
# POST_RETRIEVE_MEMORY.replace("{keywords}", keywords).replace(
|
|
555
|
-
# "{memory_list}", memories
|
|
556
|
-
# )
|
|
557
|
-
# )
|
|
558
|
-
# except Exception as e:
|
|
559
|
-
# logger.warning(f"Error processing retrieved memories with LLM: {e}")
|
|
560
|
-
# # Fallback to returning raw memories if LLM processing fails
|
|
561
|
-
# return memories
|
|
562
|
-
# else:
|
|
563
520
|
return memories
|
|
564
521
|
|
|
565
522
|
def _cosine_similarity(self, vec_a, vec_b):
|
|
@@ -663,23 +620,8 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
663
620
|
"count": 0,
|
|
664
621
|
}
|
|
665
622
|
|
|
666
|
-
# Collect all conversation IDs related to the topic
|
|
667
|
-
conversation_ids = set()
|
|
668
|
-
if results["metadatas"] and results["metadatas"][0]:
|
|
669
|
-
for metadata in results["metadatas"][0]:
|
|
670
|
-
conv_id = metadata.get("conversation_id")
|
|
671
|
-
if conv_id:
|
|
672
|
-
conversation_ids.add(conv_id)
|
|
673
|
-
|
|
674
|
-
# Get all memories to find those with matching conversation IDs
|
|
675
|
-
all_memories = collection.get()
|
|
676
|
-
|
|
677
623
|
# Find IDs to remove
|
|
678
|
-
ids_to_remove = []
|
|
679
|
-
if all_memories["metadatas"]:
|
|
680
|
-
for i, metadata in enumerate(all_memories["metadatas"]):
|
|
681
|
-
if metadata.get("conversation_id") in conversation_ids:
|
|
682
|
-
ids_to_remove.append(all_memories["ids"][i])
|
|
624
|
+
ids_to_remove = results["ids"][0]
|
|
683
625
|
|
|
684
626
|
# Remove the memories
|
|
685
627
|
if ids_to_remove:
|
|
@@ -689,7 +631,6 @@ class ChromaMemoryService(BaseMemoryService):
|
|
|
689
631
|
"success": True,
|
|
690
632
|
"message": f"Successfully removed {len(ids_to_remove)} memory chunks related to '{topic}'",
|
|
691
633
|
"count": len(ids_to_remove),
|
|
692
|
-
"conversations_affected": len(conversation_ids),
|
|
693
634
|
}
|
|
694
635
|
|
|
695
636
|
except Exception as e:
|
|
@@ -513,7 +513,9 @@ class ContextPersistenceService:
|
|
|
513
513
|
logger.error(f"ERROR: Failed to store adaptive behavior: {e}")
|
|
514
514
|
return False
|
|
515
515
|
|
|
516
|
-
def remove_adaptive_behavior(
|
|
516
|
+
def remove_adaptive_behavior(
|
|
517
|
+
self, agent_name: str, behavior_id: str, is_local: bool = False
|
|
518
|
+
) -> bool:
|
|
517
519
|
"""
|
|
518
520
|
Removes a specific adaptive behavior for an agent.
|
|
519
521
|
|
|
@@ -524,9 +526,13 @@ class ContextPersistenceService:
|
|
|
524
526
|
Returns:
|
|
525
527
|
True if successful or behavior didn't exist, False on error.
|
|
526
528
|
"""
|
|
527
|
-
|
|
528
|
-
|
|
529
|
+
|
|
530
|
+
adaptive_file_path = (
|
|
531
|
+
self.adaptive_behaviors_local_path
|
|
532
|
+
if is_local
|
|
533
|
+
else self.adaptive_behaviors_file_path
|
|
529
534
|
)
|
|
535
|
+
adaptive_data = self._read_json_file(adaptive_file_path, default_value={})
|
|
530
536
|
|
|
531
537
|
if not isinstance(adaptive_data, dict):
|
|
532
538
|
logger.warning("WARNING: Adaptive behaviors file was not a dictionary.")
|
|
@@ -540,7 +546,7 @@ class ContextPersistenceService:
|
|
|
540
546
|
del adaptive_data[agent_name]
|
|
541
547
|
|
|
542
548
|
try:
|
|
543
|
-
self._write_json_file(
|
|
549
|
+
self._write_json_file(adaptive_file_path, adaptive_data)
|
|
544
550
|
logger.info(
|
|
545
551
|
f"INFO: Removed adaptive behavior '{behavior_id}' for agent '{agent_name}'"
|
|
546
552
|
)
|
AgentCrew/modules/memory/tool.py
CHANGED
|
@@ -62,12 +62,12 @@ def get_memory_forget_tool_handler(memory_service: BaseMemoryService) -> Callabl
|
|
|
62
62
|
try:
|
|
63
63
|
result = memory_service.forget_ids(ids, agent_name)
|
|
64
64
|
return (
|
|
65
|
-
f"
|
|
65
|
+
f"Removed memories: {result.get('message', 'Success')}"
|
|
66
66
|
if result.get("success")
|
|
67
|
-
else f"
|
|
67
|
+
else f"Removal incomplete: {result.get('message', 'Not found')}"
|
|
68
68
|
)
|
|
69
69
|
except Exception as e:
|
|
70
|
-
return f"
|
|
70
|
+
return f"Memories removal failed: {str(e)}"
|
|
71
71
|
|
|
72
72
|
return handle_memory_forget
|
|
73
73
|
|
|
@@ -162,11 +162,11 @@ def get_memory_retrieve_tool_handler(memory_service: BaseMemoryService) -> Calla
|
|
|
162
162
|
to_date = params.get("to_date", None)
|
|
163
163
|
|
|
164
164
|
if not query:
|
|
165
|
-
raise ValueError("
|
|
165
|
+
raise ValueError("Phrases required for memory search. Try again.")
|
|
166
166
|
|
|
167
167
|
if len(query) < 3:
|
|
168
168
|
raise ValueError(
|
|
169
|
-
f"
|
|
169
|
+
f"Search term '{query}' too short. Try again with more semantica and descriptive phrases."
|
|
170
170
|
)
|
|
171
171
|
|
|
172
172
|
# Use provided agent_name or fallback to current agent
|
|
@@ -188,13 +188,13 @@ def get_memory_retrieve_tool_handler(memory_service: BaseMemoryService) -> Calla
|
|
|
188
188
|
)
|
|
189
189
|
|
|
190
190
|
if not result or result.strip() == "":
|
|
191
|
-
return f"
|
|
191
|
+
return f"No memories found for '{query}'. Try broader phrases or related terms."
|
|
192
192
|
|
|
193
193
|
# Count memories for user feedback
|
|
194
|
-
return f"
|
|
194
|
+
return f"Found relevant memories:\n\n{result}"
|
|
195
195
|
|
|
196
196
|
except Exception as e:
|
|
197
|
-
return f"
|
|
197
|
+
return f"Memory search failed: {str(e)}"
|
|
198
198
|
|
|
199
199
|
return handle_memory_retrieve
|
|
200
200
|
|
|
@@ -220,8 +220,7 @@ All behaviors must follow 'when..., [action]...' format for automatic activation
|
|
|
220
220
|
"scope": {
|
|
221
221
|
"type": "string",
|
|
222
222
|
"enum": ["global", "project"],
|
|
223
|
-
"
|
|
224
|
-
"description": "Scope of the behavior. 'global' for all interactions, 'project' for current project only. Default is 'global'. Optional.",
|
|
223
|
+
"description": "Scope of the behavior. 'global' apply for all conversations, 'project' applys for current project only. Use project scope when behavior is project-specific, use global if behavior is general.",
|
|
225
224
|
},
|
|
226
225
|
}
|
|
227
226
|
|
|
@@ -261,32 +260,32 @@ def get_adapt_tool_handler(persistence_service: Any) -> Callable:
|
|
|
261
260
|
scope = params.get("scope", "global").strip().lower()
|
|
262
261
|
|
|
263
262
|
if not behavior_id:
|
|
264
|
-
return "
|
|
263
|
+
return "Behavior ID required (e.g., 'communication_style_technical')."
|
|
265
264
|
|
|
266
265
|
if not behavior:
|
|
267
|
-
return "
|
|
266
|
+
return "Behavior description required in 'when...do...' format."
|
|
268
267
|
|
|
269
268
|
# Validate format
|
|
270
269
|
behavior_lower = behavior.lower()
|
|
271
270
|
if not behavior_lower.startswith("when "):
|
|
272
|
-
return "
|
|
271
|
+
return "Use format: 'when [condition], [action]'"
|
|
273
272
|
|
|
274
273
|
current_agent = AgentManager.get_instance().get_current_agent()
|
|
275
274
|
agent_name = current_agent.name if current_agent else "default"
|
|
276
275
|
|
|
277
276
|
try:
|
|
278
277
|
success = persistence_service.store_adaptive_behavior(
|
|
279
|
-
agent_name, behavior_id, behavior, scope == "
|
|
278
|
+
agent_name, behavior_id, behavior, scope == "project"
|
|
280
279
|
)
|
|
281
280
|
return (
|
|
282
|
-
f"
|
|
281
|
+
f"Stored behavior '{behavior_id}': {behavior}"
|
|
283
282
|
if success
|
|
284
|
-
else "
|
|
283
|
+
else "Storage completed but may need verification."
|
|
285
284
|
)
|
|
286
285
|
except ValueError as e:
|
|
287
|
-
return f"
|
|
286
|
+
return f"Invalid format: {str(e)}"
|
|
288
287
|
except Exception as e:
|
|
289
|
-
return f"
|
|
288
|
+
return f"Storage failed: {str(e)}"
|
|
290
289
|
|
|
291
290
|
return handle_adapt
|
|
292
291
|
|
|
@@ -225,8 +225,16 @@ class OpenAIResponseService(BaseLLMService):
|
|
|
225
225
|
"input": input_data,
|
|
226
226
|
"stream": True,
|
|
227
227
|
"instructions": self.system_prompt or None,
|
|
228
|
+
"temperature": self.temperature,
|
|
228
229
|
}
|
|
229
230
|
|
|
231
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
232
|
+
if forced_sample_params:
|
|
233
|
+
if forced_sample_params.temperature is not None:
|
|
234
|
+
stream_params["temperature"] = forced_sample_params.temperature
|
|
235
|
+
if forced_sample_params.top_p is not None:
|
|
236
|
+
stream_params["top_p"] = forced_sample_params.top_p
|
|
237
|
+
|
|
230
238
|
# Add reasoning configuration for thinking models
|
|
231
239
|
if "thinking" in ModelRegistry.get_model_capabilities(full_model_id):
|
|
232
240
|
if self.reasoning_effort:
|
|
@@ -251,17 +259,17 @@ class OpenAIResponseService(BaseLLMService):
|
|
|
251
259
|
|
|
252
260
|
stream_params["tools"] = all_tools
|
|
253
261
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
262
|
+
if (
|
|
263
|
+
"structured_output" in ModelRegistry.get_model_capabilities(full_model_id)
|
|
264
|
+
and self.structured_output
|
|
265
|
+
):
|
|
266
|
+
stream_params["text"] = {
|
|
267
|
+
"format": {
|
|
268
|
+
"name": "default",
|
|
269
|
+
"type": "json_schema",
|
|
270
|
+
"json_schema": self.structured_output,
|
|
271
|
+
}
|
|
272
|
+
}
|
|
265
273
|
|
|
266
274
|
return await self.client.responses.create(**stream_params)
|
|
267
275
|
|
|
@@ -196,6 +196,7 @@ class OpenAIService(BaseLLMService):
|
|
|
196
196
|
"stream_options": {"include_usage": True},
|
|
197
197
|
"max_tokens": 20000,
|
|
198
198
|
}
|
|
199
|
+
|
|
199
200
|
if "thinking" in ModelRegistry.get_model_capabilities(full_model_id):
|
|
200
201
|
stream_params.pop("max_tokens", None)
|
|
201
202
|
if self.reasoning_effort:
|
|
@@ -203,6 +204,20 @@ class OpenAIService(BaseLLMService):
|
|
|
203
204
|
else:
|
|
204
205
|
stream_params["temperature"] = self.temperature
|
|
205
206
|
stream_params["top_p"] = 0.95
|
|
207
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
208
|
+
if forced_sample_params:
|
|
209
|
+
if forced_sample_params.temperature is not None:
|
|
210
|
+
stream_params["temperature"] = forced_sample_params.temperature
|
|
211
|
+
if forced_sample_params.top_p is not None:
|
|
212
|
+
stream_params["top_p"] = forced_sample_params.top_p
|
|
213
|
+
if forced_sample_params.frequency_penalty is not None:
|
|
214
|
+
stream_params["frequency_penalty"] = (
|
|
215
|
+
forced_sample_params.frequency_penalty
|
|
216
|
+
)
|
|
217
|
+
if forced_sample_params.presence_penalty is not None:
|
|
218
|
+
stream_params["presence_penalty"] = (
|
|
219
|
+
forced_sample_params.presence_penalty
|
|
220
|
+
)
|
|
206
221
|
|
|
207
222
|
# Add system message if provided
|
|
208
223
|
if self.system_prompt:
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
PRE_ANALYZE_PROMPT = """
|
|
2
2
|
<MEMORY_PROCESSING_REQUEST>
|
|
3
|
-
Extract this conversation for AI memory storage. Create a comprehensive xml record that includes all fields in <OUTPUT_FORMAT> below.
|
|
3
|
+
Extract this conversation for AI memory storage. Create a comprehensive xml record must start with <MEMORY> that includes all fields in <OUTPUT_FORMAT> below.
|
|
4
4
|
<OUTPUT_FORMAT>
|
|
5
|
-
1.
|
|
5
|
+
1. HEAD: one short sentence that describe this conversation.
|
|
6
6
|
2. DATE: {current_date}
|
|
7
|
-
3.
|
|
8
|
-
4.
|
|
7
|
+
3. CONTEXT: Background information relevant to understanding this exchange
|
|
8
|
+
4. INSIGHTS: Important insights, lessons learned, or conclusions drawn from the conversation
|
|
9
9
|
5. ENTITIES: Important people, organizations, products, or concepts mentioned including essential facts, concepts, or data points discussed about that entity
|
|
10
10
|
6. DOMAINS: The subject domain(s) this conversation relates to
|
|
11
|
+
7. RESOURCES: Important urls, file paths has been mentioned in conversation.
|
|
11
12
|
8. CONVERSATION_NOTES: The key extracted information from conversation.
|
|
12
13
|
</OUTPUT_FORMAT>
|
|
13
14
|
|
|
@@ -22,7 +23,7 @@ PRE_ANALYZE_PROMPT = """
|
|
|
22
23
|
|
|
23
24
|
<PROCESSING_INSTRUCTIONS>
|
|
24
25
|
1. Format each section with its heading in ALL CAPS as a tag wrapped around the content.
|
|
25
|
-
2. If a section would be empty, include the heading with
|
|
26
|
+
2. If a section would be empty, include the heading with empty text "" as the content.
|
|
26
27
|
3. Focus on extracting factual information rather than making assumptions.
|
|
27
28
|
4. <CONVERSATION_NOTES> should capture all the key points and the direction of flow of the whole conversation in concise.
|
|
28
29
|
5. No explanations or additional text.
|
|
@@ -30,10 +31,12 @@ PRE_ANALYZE_PROMPT = """
|
|
|
30
31
|
|
|
31
32
|
<EXAMPLES>
|
|
32
33
|
<MEMORY>
|
|
33
|
-
<
|
|
34
|
+
<HEAD>discussion about donald trump</HEAD>
|
|
34
35
|
<DATE>2025-01-03</DATE>
|
|
35
|
-
<
|
|
36
|
-
<
|
|
36
|
+
<CONTEXT>discussed with user about details and facts around Donald Trump</CONTEXT>
|
|
37
|
+
<INSIGHTS>
|
|
38
|
+
<INSIGHT>To get accurate information, assistant need to collect from multi sources</INSIGHT>
|
|
39
|
+
</INSIGHTS>
|
|
37
40
|
<ENTITIES>
|
|
38
41
|
<ENTITY>
|
|
39
42
|
<NAME>DONALP TRUMP</NAME>
|
|
@@ -43,6 +46,9 @@ PRE_ANALYZE_PROMPT = """
|
|
|
43
46
|
<DOMAINS>
|
|
44
47
|
<DOMAIN>Politics</DOMAIN>
|
|
45
48
|
</DOMAINS>
|
|
49
|
+
<RESOURCES>
|
|
50
|
+
<RESOURCE>https://en.wikipedia.org/wiki/Donald_Trump</RESOURCE>
|
|
51
|
+
</RESOURCES>
|
|
46
52
|
<CONVERSATION_NOTES>
|
|
47
53
|
<NOTE>User asked about Donald Trump's background. Assistant provided details on his presidency and key events.</NOTE>
|
|
48
54
|
</CONVERSATION_NOTES>
|
|
@@ -53,16 +59,17 @@ PRE_ANALYZE_PROMPT = """
|
|
|
53
59
|
|
|
54
60
|
PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
55
61
|
<MEMORY_PROCESSING_REQUEST>
|
|
56
|
-
Extract this conversation for AI memory storage. Create a comprehensive xml record following INSTRUCTIONS that includes all fields in <OUTPUT_FORMAT> below. No explanations or additional text.
|
|
62
|
+
Extract this conversation for AI memory storage. Create a comprehensive xml record must start with <MEMORY> following INSTRUCTIONS that includes all fields in <OUTPUT_FORMAT> below. No explanations or additional text.
|
|
57
63
|
|
|
58
64
|
<INSTRUCTIONS>
|
|
59
|
-
1.
|
|
65
|
+
1. HEAD: update existed HEAD from <PREVIOUS_CONVERSATION_CONTEXT> if available or create one short sentence that describe this conversation.
|
|
60
66
|
2. DATE: {current_date}
|
|
61
|
-
3.
|
|
62
|
-
4.
|
|
67
|
+
3. CONTEXT: Merge the CONTEXT of <PREVIOUS_CONVERSATION_CONTEXT> with new context in CONVERSATION_TURN
|
|
68
|
+
4. INSIGHTS: Add to the INSIGHTS of <PREVIOUS_CONVERSATION_CONTEXT> for new important insights, lessons learned, or conclusions drawn from CONVERSATION_TURN.
|
|
63
69
|
5. ENTITIES: Add to the ENTITIES of <PREVIOUS_CONVERSATION_CONTEXT> for new important people, organizations, products, or concepts mentioned in CONVERSATION_TURN including essential facts, concepts, or data points discussed about that entity
|
|
64
70
|
6. DOMAINS: Add to the DOMAINS of <PREVIOUS_CONVERSATION_CONTEXT> for new subject domain(s) in CONVERSATION_TURN related
|
|
65
|
-
7.
|
|
71
|
+
7. RESOURCES: Add to the RESOURCES of <PREVIOUS_CONVERSATION_CONTEXT> for new important urls, file paths, mentioned in CONVERSATION_TURN
|
|
72
|
+
8. CONVERSATION_NOTES: Add the CONVERSATION_NOTES of <PREVIOUS_CONVERSATION_CONTEXT> for new key notes extracted information from CONVERSATION_TURN. PREVIOUS_CONVERSATION_CONTEXT CONVERSATION_NOTES must be keep intact.
|
|
66
73
|
</INSTRUCTIONS>
|
|
67
74
|
|
|
68
75
|
{conversation_context}
|
|
@@ -78,10 +85,13 @@ PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
|
78
85
|
|
|
79
86
|
<OUTPUT_FORMAT>
|
|
80
87
|
<MEMORY>
|
|
81
|
-
<
|
|
88
|
+
<HEAD>[head]</HEAD>
|
|
82
89
|
<DATE>[current_date]</DATE>
|
|
83
90
|
<SUMMARY>[merged_summary]</SUMMARY>
|
|
84
91
|
<CONTEXT>[merged_context]</CONTEXT>
|
|
92
|
+
<INSIGHTS>
|
|
93
|
+
<INSIGHT>[added_insight]</INSIGHT>
|
|
94
|
+
</INSIGHTS>
|
|
85
95
|
<ENTITIES>
|
|
86
96
|
<ENTITY>
|
|
87
97
|
<NAME>[added_entity_name]</NAME>
|
|
@@ -91,6 +101,9 @@ PRE_ANALYZE_WITH_CONTEXT_PROMPT = """
|
|
|
91
101
|
<DOMAINS>
|
|
92
102
|
<DOMAIN>[added_domain]</DOMAIN>
|
|
93
103
|
</DOMAINS>
|
|
104
|
+
<RESOURCES>
|
|
105
|
+
</RESOURCE>[added_resource]</RESOURCE>
|
|
106
|
+
</RESOURCES>
|
|
94
107
|
<CONVERSATION_NOTES>
|
|
95
108
|
<NOTE>[added_notes]</NOTE>
|
|
96
109
|
</CONVERSATION_NOTES>
|