agentcrew-ai 0.8.2__py3-none-any.whl → 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AgentCrew/__init__.py +1 -1
- AgentCrew/main.py +3 -1
- AgentCrew/modules/a2a/agent_cards.py +8 -2
- AgentCrew/modules/a2a/errors.py +72 -0
- AgentCrew/modules/a2a/server.py +21 -2
- AgentCrew/modules/a2a/task_manager.py +180 -39
- AgentCrew/modules/agents/local_agent.py +11 -0
- AgentCrew/modules/browser_automation/element_extractor.py +4 -3
- AgentCrew/modules/browser_automation/js/draw_element_boxes.js +200 -0
- AgentCrew/modules/browser_automation/js/extract_clickable_elements.js +58 -26
- AgentCrew/modules/browser_automation/js/extract_elements_by_text.js +21 -19
- AgentCrew/modules/browser_automation/js/extract_input_elements.js +22 -23
- AgentCrew/modules/browser_automation/js/filter_hidden_elements.js +104 -0
- AgentCrew/modules/browser_automation/js/remove_element_boxes.js +29 -0
- AgentCrew/modules/browser_automation/js_loader.py +385 -92
- AgentCrew/modules/browser_automation/service.py +118 -347
- AgentCrew/modules/browser_automation/tool.py +28 -29
- AgentCrew/modules/chat/message/command_processor.py +7 -1
- AgentCrew/modules/chat/message/conversation.py +9 -8
- AgentCrew/modules/code_analysis/service.py +39 -0
- AgentCrew/modules/code_analysis/tool.py +10 -1
- AgentCrew/modules/console/command_handlers.py +186 -1
- AgentCrew/modules/console/completers.py +67 -0
- AgentCrew/modules/console/console_ui.py +59 -5
- AgentCrew/modules/console/display_handlers.py +12 -0
- AgentCrew/modules/console/input_handler.py +2 -0
- AgentCrew/modules/console/ui_effects.py +3 -4
- AgentCrew/modules/custom_llm/service.py +25 -3
- AgentCrew/modules/file_editing/tool.py +9 -11
- AgentCrew/modules/google/native_service.py +13 -0
- AgentCrew/modules/gui/widgets/message_bubble.py +1 -6
- AgentCrew/modules/llm/constants.py +38 -1
- AgentCrew/modules/llm/model_registry.py +9 -0
- AgentCrew/modules/llm/types.py +12 -1
- AgentCrew/modules/memory/base_service.py +2 -2
- AgentCrew/modules/memory/chroma_service.py +79 -138
- AgentCrew/modules/memory/context_persistent.py +10 -4
- AgentCrew/modules/memory/tool.py +17 -18
- AgentCrew/modules/openai/response_service.py +19 -11
- AgentCrew/modules/openai/service.py +15 -0
- AgentCrew/modules/prompts/constants.py +27 -14
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/METADATA +3 -3
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/RECORD +47 -43
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/WHEEL +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/entry_points.txt +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/licenses/LICENSE +0 -0
- {agentcrew_ai-0.8.2.dist-info → agentcrew_ai-0.8.4.dist-info}/top_level.txt +0 -0
|
@@ -6,6 +6,7 @@ Refactored to use separate modules for different responsibilities.
|
|
|
6
6
|
from __future__ import annotations
|
|
7
7
|
import asyncio
|
|
8
8
|
import time
|
|
9
|
+
import sys
|
|
9
10
|
import signal
|
|
10
11
|
from typing import Any
|
|
11
12
|
from rich.console import Console
|
|
@@ -421,11 +422,12 @@ class ConsoleUI(Observer):
|
|
|
421
422
|
|
|
422
423
|
try:
|
|
423
424
|
while True:
|
|
424
|
-
if
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
425
|
+
if sys.platform != "win32":
|
|
426
|
+
if (
|
|
427
|
+
not signal.getsignal(signal.SIGWINCH)
|
|
428
|
+
or signal.getsignal(signal.SIGWINCH) == signal.SIG_DFL
|
|
429
|
+
):
|
|
430
|
+
signal.signal(signal.SIGWINCH, self._handle_terminal_resize)
|
|
429
431
|
try:
|
|
430
432
|
# Get user input (now in separate thread)
|
|
431
433
|
self.input_handler.is_message_processing = False
|
|
@@ -544,6 +546,58 @@ class ConsoleUI(Observer):
|
|
|
544
546
|
self.command_handlers.handle_edit_config_command()
|
|
545
547
|
continue
|
|
546
548
|
|
|
549
|
+
# Handle list_behaviors command
|
|
550
|
+
elif user_input.strip() == "/list_behaviors":
|
|
551
|
+
self.command_handlers.handle_list_behaviors_command()
|
|
552
|
+
continue
|
|
553
|
+
|
|
554
|
+
# Handle update_behavior command
|
|
555
|
+
elif user_input.strip().startswith("/update_behavior "):
|
|
556
|
+
args = user_input.strip()[17:].strip()
|
|
557
|
+
if args:
|
|
558
|
+
parts = args.split(maxsplit=2)
|
|
559
|
+
if len(parts) == 3:
|
|
560
|
+
scope, behavior_id, behavior_text = parts
|
|
561
|
+
self.command_handlers.handle_update_behavior_command(
|
|
562
|
+
behavior_id, behavior_text, scope
|
|
563
|
+
)
|
|
564
|
+
else:
|
|
565
|
+
self.console.print(
|
|
566
|
+
Text(
|
|
567
|
+
"Usage: /update_behavior <scope> <id> <behavior_text>\n"
|
|
568
|
+
"Example: /update_behavior project my_behavior_id when user asks about X, do provide detailed examples",
|
|
569
|
+
style=RICH_STYLE_YELLOW,
|
|
570
|
+
)
|
|
571
|
+
)
|
|
572
|
+
else:
|
|
573
|
+
self.console.print(
|
|
574
|
+
Text(
|
|
575
|
+
"Usage: /update_behavior <scope> <id> <behavior_text>\n"
|
|
576
|
+
"Example: /update_behavior project my_behavior_id when user asks about X, do provide detailed examples",
|
|
577
|
+
style=RICH_STYLE_YELLOW,
|
|
578
|
+
)
|
|
579
|
+
)
|
|
580
|
+
continue
|
|
581
|
+
|
|
582
|
+
# Handle delete_behavior command
|
|
583
|
+
elif user_input.strip().startswith("/delete_behavior "):
|
|
584
|
+
args = user_input.strip()[17:].strip()
|
|
585
|
+
parts = args.split(maxsplit=1)
|
|
586
|
+
if len(parts) == 2:
|
|
587
|
+
scope, behavior_id = parts
|
|
588
|
+
self.command_handlers.handle_delete_behavior_command(
|
|
589
|
+
behavior_id, scope
|
|
590
|
+
)
|
|
591
|
+
else:
|
|
592
|
+
self.console.print(
|
|
593
|
+
Text(
|
|
594
|
+
"Usage: /delete_behavior <scope> <id>\n"
|
|
595
|
+
"Example: /delete_behavior <scope> my_behavior_id",
|
|
596
|
+
style=RICH_STYLE_YELLOW,
|
|
597
|
+
)
|
|
598
|
+
)
|
|
599
|
+
continue
|
|
600
|
+
|
|
547
601
|
elif user_input.startswith("/voice"):
|
|
548
602
|
self.input_handler._stop_input_thread()
|
|
549
603
|
self.voice_recording = True
|
|
@@ -350,6 +350,18 @@ class DisplayHandlers:
|
|
|
350
350
|
"Use '/toggle_session_yolo' to toggle YOLO mode (auto-approval of tool calls) in this session only.",
|
|
351
351
|
style=RICH_STYLE_YELLOW,
|
|
352
352
|
),
|
|
353
|
+
Text(
|
|
354
|
+
"Use '/list_behaviors' to list all adaptive behaviors (global and project-specific).",
|
|
355
|
+
style=RICH_STYLE_YELLOW,
|
|
356
|
+
),
|
|
357
|
+
Text(
|
|
358
|
+
"Use '/update_behavior <scope> <id> <behavior>' to create or update an adaptive behavior (format: 'when..., do...').",
|
|
359
|
+
style=RICH_STYLE_YELLOW,
|
|
360
|
+
),
|
|
361
|
+
Text(
|
|
362
|
+
"Use '/delete_behavior <scope> <id>' to delete an adaptive behavior.",
|
|
363
|
+
style=RICH_STYLE_YELLOW,
|
|
364
|
+
),
|
|
353
365
|
Text("Use '/list' to list saved conversations.", style=RICH_STYLE_YELLOW),
|
|
354
366
|
Text(
|
|
355
367
|
"Use '/load <id>' or '/load <number>' to load a conversation.",
|
|
@@ -218,6 +218,7 @@ class InputHandler:
|
|
|
218
218
|
|
|
219
219
|
@kb.add(Keys.ControlUp)
|
|
220
220
|
@kb.add(Keys.Escape, Keys.Up)
|
|
221
|
+
@kb.add(Keys.ControlK)
|
|
221
222
|
def _(event):
|
|
222
223
|
"""Navigate to previous history entry."""
|
|
223
224
|
buffer = event.current_buffer
|
|
@@ -241,6 +242,7 @@ class InputHandler:
|
|
|
241
242
|
|
|
242
243
|
@kb.add(Keys.ControlDown)
|
|
243
244
|
@kb.add(Keys.Escape, Keys.Down)
|
|
245
|
+
@kb.add(Keys.ControlJ)
|
|
244
246
|
def _(event):
|
|
245
247
|
"""Navigate to next history entry if cursor is at last line."""
|
|
246
248
|
buffer = event.current_buffer
|
|
@@ -107,8 +107,7 @@ class UIEffects:
|
|
|
107
107
|
self.live = Live(
|
|
108
108
|
live_panel,
|
|
109
109
|
console=self.console,
|
|
110
|
-
auto_refresh=
|
|
111
|
-
refresh_per_second=8,
|
|
110
|
+
auto_refresh=False,
|
|
112
111
|
vertical_overflow="crop",
|
|
113
112
|
)
|
|
114
113
|
self.live.start()
|
|
@@ -167,10 +166,10 @@ class UIEffects:
|
|
|
167
166
|
subtitle=subtitle,
|
|
168
167
|
title_align="left",
|
|
169
168
|
expand=False,
|
|
170
|
-
height=height_limit
|
|
169
|
+
height=min(height_limit, len(lines)),
|
|
171
170
|
border_style=RICH_STYLE_GREEN,
|
|
172
171
|
)
|
|
173
|
-
self.live.update(live_panel)
|
|
172
|
+
self.live.update(live_panel, refresh=True)
|
|
174
173
|
|
|
175
174
|
def finish_live_update(self):
|
|
176
175
|
"""Stop the live update display."""
|
|
@@ -121,16 +121,38 @@ class CustomLLMService(OpenAIService):
|
|
|
121
121
|
# "max_tokens": 16000,
|
|
122
122
|
}
|
|
123
123
|
stream_params["temperature"] = self.temperature
|
|
124
|
-
stream_params["extra_body"] = {"min_p": 0.
|
|
124
|
+
stream_params["extra_body"] = {"min_p": 0.02}
|
|
125
125
|
|
|
126
|
+
full_model_id = f"{self._provider_name}/{self.model}"
|
|
127
|
+
|
|
128
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
129
|
+
if forced_sample_params:
|
|
130
|
+
if forced_sample_params.temperature is not None:
|
|
131
|
+
stream_params["temperature"] = forced_sample_params.temperature
|
|
132
|
+
if forced_sample_params.top_p is not None:
|
|
133
|
+
stream_params["top_p"] = forced_sample_params.top_p
|
|
134
|
+
if forced_sample_params.top_k is not None:
|
|
135
|
+
stream_params["extra_body"]["top_k"] = forced_sample_params.top_k
|
|
136
|
+
if forced_sample_params.frequency_penalty is not None:
|
|
137
|
+
stream_params["frequency_penalty"] = (
|
|
138
|
+
forced_sample_params.frequency_penalty
|
|
139
|
+
)
|
|
140
|
+
if forced_sample_params.presence_penalty is not None:
|
|
141
|
+
stream_params["presence_penalty"] = (
|
|
142
|
+
forced_sample_params.presence_penalty
|
|
143
|
+
)
|
|
144
|
+
if forced_sample_params.repetition_penalty is not None:
|
|
145
|
+
stream_params["extra_body"]["repetition_penalty"] = (
|
|
146
|
+
forced_sample_params.repetition_penalty
|
|
147
|
+
)
|
|
148
|
+
if forced_sample_params.min_p is not None:
|
|
149
|
+
stream_params["extra_body"]["min_p"] = forced_sample_params.min_p
|
|
126
150
|
# Add system message if provided
|
|
127
151
|
if self.system_prompt:
|
|
128
152
|
stream_params["messages"] = self._convert_internal_format(
|
|
129
153
|
[{"role": "system", "content": self.system_prompt}] + messages
|
|
130
154
|
)
|
|
131
155
|
|
|
132
|
-
full_model_id = f"{self._provider_name}/{self.model}"
|
|
133
|
-
|
|
134
156
|
# Add tools if available
|
|
135
157
|
if self.tools and "tool_use" in ModelRegistry.get_model_capabilities(
|
|
136
158
|
full_model_id
|
|
@@ -115,13 +115,13 @@ def get_file_write_or_edit_tool_handler(
|
|
|
115
115
|
text_or_search_replace_blocks = params.get("text_or_search_replace_blocks")
|
|
116
116
|
|
|
117
117
|
if not file_path:
|
|
118
|
-
raise ValueError("
|
|
118
|
+
raise ValueError("Error: No file path provided.")
|
|
119
119
|
|
|
120
120
|
if percentage_to_change is None:
|
|
121
|
-
raise ValueError("
|
|
121
|
+
raise ValueError("Error: No percentage_to_change provided.")
|
|
122
122
|
|
|
123
123
|
if not text_or_search_replace_blocks:
|
|
124
|
-
raise ValueError("
|
|
124
|
+
raise ValueError("Error: No content or search/replace blocks provided.")
|
|
125
125
|
|
|
126
126
|
result = file_editing_service.write_or_edit_file(
|
|
127
127
|
file_path=file_path,
|
|
@@ -130,7 +130,7 @@ def get_file_write_or_edit_tool_handler(
|
|
|
130
130
|
)
|
|
131
131
|
|
|
132
132
|
if result["status"] == "success":
|
|
133
|
-
parts = [f"
|
|
133
|
+
parts = [f"{result['file_path']}"]
|
|
134
134
|
parts.append(f"{result.get('changes_applied', 1)} change(s)")
|
|
135
135
|
if result.get("syntax_check", {}).get("is_valid"):
|
|
136
136
|
parts.append(
|
|
@@ -153,21 +153,19 @@ def get_file_write_or_edit_tool_handler(
|
|
|
153
153
|
else ""
|
|
154
154
|
)
|
|
155
155
|
restore = " | Backup restored" if result.get("backup_restored") else ""
|
|
156
|
-
return (
|
|
157
|
-
f"❌ Syntax ({result.get('language', '?')}):\n{errors}{extra}{restore}"
|
|
158
|
-
)
|
|
156
|
+
return f"Syntax ({result.get('language', '?')}):\n{errors}{extra}{restore}"
|
|
159
157
|
|
|
160
158
|
elif result["status"] in ["no_match", "ambiguous"]:
|
|
161
|
-
return f"
|
|
159
|
+
return f"{result['status'].title()}: {result.get('error', '?')} (block {result.get('block_index', '?')})"
|
|
162
160
|
|
|
163
161
|
elif result["status"] == "denied":
|
|
164
|
-
return f"
|
|
162
|
+
return f"Access denied: {result.get('error', 'Permission error')}"
|
|
165
163
|
|
|
166
164
|
elif result["status"] == "parse_error":
|
|
167
|
-
return f"
|
|
165
|
+
return f"Parse: {result.get('error', 'Invalid block format')}"
|
|
168
166
|
|
|
169
167
|
else:
|
|
170
|
-
return f"
|
|
168
|
+
return f"{result.get('error', 'Unknown error')}"
|
|
171
169
|
|
|
172
170
|
return handle_file_write_or_edit
|
|
173
171
|
|
|
@@ -393,6 +393,19 @@ class GoogleAINativeService(BaseLLMService):
|
|
|
393
393
|
top_p=0.95,
|
|
394
394
|
)
|
|
395
395
|
|
|
396
|
+
forced_sample_params = ModelRegistry.get_model_sample_params(full_model_id)
|
|
397
|
+
if forced_sample_params:
|
|
398
|
+
if forced_sample_params.temperature is not None:
|
|
399
|
+
config.temperature = forced_sample_params.temperature
|
|
400
|
+
if forced_sample_params.top_p is not None:
|
|
401
|
+
config.top_p = forced_sample_params.top_p
|
|
402
|
+
if forced_sample_params.top_k is not None:
|
|
403
|
+
config.top_k = forced_sample_params.top_k
|
|
404
|
+
if forced_sample_params.frequency_penalty is not None:
|
|
405
|
+
config.frequency_penalty = forced_sample_params.frequency_penalty
|
|
406
|
+
if forced_sample_params.presence_penalty is not None:
|
|
407
|
+
config.presence_penalty = forced_sample_params.presence_penalty
|
|
408
|
+
|
|
396
409
|
# Add system instruction if available
|
|
397
410
|
if self.system_prompt:
|
|
398
411
|
config.system_instruction = self.system_prompt
|
|
@@ -564,12 +564,7 @@ class MessageBubble(QFrame):
|
|
|
564
564
|
return
|
|
565
565
|
|
|
566
566
|
if self.streaming_text and self.streaming_text != self.message_label.text():
|
|
567
|
-
|
|
568
|
-
self.message_label.setText(
|
|
569
|
-
"[...]" + "".join(chunked_streaming_text[-60:])
|
|
570
|
-
if len(chunked_streaming_text) > 60
|
|
571
|
-
else self.streaming_text
|
|
572
|
-
)
|
|
567
|
+
self.message_label.setText(self.streaming_text)
|
|
573
568
|
|
|
574
569
|
def _finalize_streaming(self):
|
|
575
570
|
"""Convert to formatted text once streaming is complete."""
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from .types import Model
|
|
1
|
+
from .types import Model, SampleParam
|
|
2
2
|
|
|
3
3
|
_ANTHROPIC_MODELS = [
|
|
4
4
|
Model(
|
|
@@ -227,6 +227,17 @@ _GOOGLE_MODELS = [
|
|
|
227
227
|
input_token_price_1m=1.25,
|
|
228
228
|
output_token_price_1m=10,
|
|
229
229
|
),
|
|
230
|
+
Model(
|
|
231
|
+
id="gemini-3-pro-preview",
|
|
232
|
+
provider="google",
|
|
233
|
+
name="Gemini 3 Pro",
|
|
234
|
+
max_context_token=1_000_000,
|
|
235
|
+
description="Google's most intelligent model family to date, built on a foundation of state-of-the-art reasoning",
|
|
236
|
+
capabilities=["tool_use", "thinking", "vision", "structured_output"],
|
|
237
|
+
force_sample_params=SampleParam(temperature=1.0),
|
|
238
|
+
input_token_price_1m=2,
|
|
239
|
+
output_token_price_1m=12,
|
|
240
|
+
),
|
|
230
241
|
]
|
|
231
242
|
|
|
232
243
|
_DEEPINFRA_MODELS = [
|
|
@@ -254,6 +265,9 @@ _DEEPINFRA_MODELS = [
|
|
|
254
265
|
name="Qwen 3 Coder",
|
|
255
266
|
description="Qwen3-Coder-480B-A35B-Instruct is the Qwen3's most agentic code model",
|
|
256
267
|
capabilities=["tool_use", "stream", "structured_output"],
|
|
268
|
+
force_sample_params=SampleParam(
|
|
269
|
+
temperature=0.7, top_p=0.8, top_k=20, repetition_penalty=1.05
|
|
270
|
+
),
|
|
257
271
|
input_token_price_1m=0.4,
|
|
258
272
|
output_token_price_1m=1.6,
|
|
259
273
|
),
|
|
@@ -263,6 +277,9 @@ _DEEPINFRA_MODELS = [
|
|
|
263
277
|
name="Qwen 3 Coder",
|
|
264
278
|
description="Qwen3-Coder-480B-A35B-Instruct is the Qwen3's most agentic code model",
|
|
265
279
|
capabilities=["tool_use", "stream", "structured_output"],
|
|
280
|
+
force_sample_params=SampleParam(
|
|
281
|
+
temperature=0.7, top_p=0.8, top_k=20, min_p=0.0
|
|
282
|
+
),
|
|
266
283
|
input_token_price_1m=0.14,
|
|
267
284
|
output_token_price_1m=1.1,
|
|
268
285
|
),
|
|
@@ -272,6 +289,9 @@ _DEEPINFRA_MODELS = [
|
|
|
272
289
|
name="Qwen 3 MoE 235B-22B",
|
|
273
290
|
description="Qwen3 is the latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models",
|
|
274
291
|
capabilities=["tool_use", "thinking", "stream", "structured_output"],
|
|
292
|
+
force_sample_params=SampleParam(
|
|
293
|
+
temperature=0.6, top_p=0.95, top_k=20, min_p=0.0
|
|
294
|
+
),
|
|
275
295
|
input_token_price_1m=0.2,
|
|
276
296
|
output_token_price_1m=0.6,
|
|
277
297
|
),
|
|
@@ -280,6 +300,7 @@ _DEEPINFRA_MODELS = [
|
|
|
280
300
|
provider="deepinfra",
|
|
281
301
|
name="Zai GLM-4.6",
|
|
282
302
|
description="The GLM-4.6 series models are foundation models designed for intelligent agents",
|
|
303
|
+
force_sample_params=SampleParam(temperature=1, top_p=0.95, top_k=40),
|
|
283
304
|
capabilities=["tool_use", "stream", "structured_output"],
|
|
284
305
|
input_token_price_1m=0.6,
|
|
285
306
|
output_token_price_1m=2.0,
|
|
@@ -290,6 +311,9 @@ _DEEPINFRA_MODELS = [
|
|
|
290
311
|
name="Qwen 3 32B",
|
|
291
312
|
description="Qwen3 is the latest generation of large language models in Qwen series, offering a comprehensive suite of dense and mixture-of-experts (MoE) models",
|
|
292
313
|
capabilities=["tool_use", "stream", "structured_output"],
|
|
314
|
+
force_sample_params=SampleParam(
|
|
315
|
+
temperature=0.6, top_p=0.95, top_k=20, min_p=0.0
|
|
316
|
+
),
|
|
293
317
|
input_token_price_1m=0.1,
|
|
294
318
|
output_token_price_1m=0.3,
|
|
295
319
|
),
|
|
@@ -308,6 +332,7 @@ _DEEPINFRA_MODELS = [
|
|
|
308
332
|
name="DeepSeek R1 0528",
|
|
309
333
|
description="The DeepSeek R1 model has undergone a minor version upgrade, with the current version being DeepSeek-R1-0528.",
|
|
310
334
|
capabilities=["tool_use", "thinking", "stream", "structured_output"],
|
|
335
|
+
force_sample_params=SampleParam(temperature=0.6),
|
|
311
336
|
input_token_price_1m=0.5,
|
|
312
337
|
output_token_price_1m=2.18,
|
|
313
338
|
),
|
|
@@ -317,6 +342,7 @@ _DEEPINFRA_MODELS = [
|
|
|
317
342
|
name="Kimi K2 Instruct",
|
|
318
343
|
description="Kimi K2 is a large-scale Mixture-of-Experts (MoE) language model developed by Moonshot AI, featuring 1 trillion total parameters with 32 billion active per forward pass",
|
|
319
344
|
capabilities=["tool_use", "stream", "structured_output"],
|
|
345
|
+
force_sample_params=SampleParam(temperature=0.6),
|
|
320
346
|
input_token_price_1m=0.5,
|
|
321
347
|
output_token_price_1m=2.0,
|
|
322
348
|
),
|
|
@@ -342,6 +368,17 @@ _GITHUB_COPILOT_MODELS = [
|
|
|
342
368
|
input_token_price_1m=0.0,
|
|
343
369
|
output_token_price_1m=0.0,
|
|
344
370
|
),
|
|
371
|
+
Model(
|
|
372
|
+
id="gemini-3-pro-preview",
|
|
373
|
+
provider="github_copilot",
|
|
374
|
+
name="Gemini 3 Pro",
|
|
375
|
+
description="",
|
|
376
|
+
capabilities=["tool_use", "vision", "stream"],
|
|
377
|
+
default=False,
|
|
378
|
+
input_token_price_1m=0.0,
|
|
379
|
+
force_sample_params=SampleParam(temperature=1.0),
|
|
380
|
+
output_token_price_1m=0.0,
|
|
381
|
+
),
|
|
345
382
|
Model(
|
|
346
383
|
id="gpt-4.1",
|
|
347
384
|
provider="github_copilot",
|
|
@@ -46,6 +46,15 @@ class ModelRegistry:
|
|
|
46
46
|
return 128_000
|
|
47
47
|
return model.max_context_token
|
|
48
48
|
|
|
49
|
+
@classmethod
|
|
50
|
+
def get_model_sample_params(cls, mode_id):
|
|
51
|
+
registry = ModelRegistry.get_instance()
|
|
52
|
+
model = registry.get_model(mode_id)
|
|
53
|
+
if not model:
|
|
54
|
+
logger.warning(f"Model not found in registry: {mode_id}")
|
|
55
|
+
return None
|
|
56
|
+
return model.force_sample_params
|
|
57
|
+
|
|
49
58
|
def _load_custom_models_from_config(self):
|
|
50
59
|
"""Loads models from custom LLM provider configurations and registers them."""
|
|
51
60
|
try:
|
AgentCrew/modules/llm/types.py
CHANGED
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
from pydantic import BaseModel
|
|
2
|
-
from typing import List, Literal
|
|
2
|
+
from typing import List, Literal, Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SampleParam(BaseModel):
|
|
6
|
+
temperature: Optional[float] = None
|
|
7
|
+
top_p: Optional[float] = None
|
|
8
|
+
min_p: Optional[float] = None
|
|
9
|
+
top_k: Optional[int] = None
|
|
10
|
+
frequency_penalty: Optional[float] = None
|
|
11
|
+
presence_penalty: Optional[float] = None
|
|
12
|
+
repetition_penalty: Optional[float] = None
|
|
3
13
|
|
|
4
14
|
|
|
5
15
|
class Model(BaseModel):
|
|
@@ -19,6 +29,7 @@ class Model(BaseModel):
|
|
|
19
29
|
]
|
|
20
30
|
]
|
|
21
31
|
default: bool = False
|
|
32
|
+
force_sample_params: Optional[SampleParam] = None
|
|
22
33
|
max_context_token: int = 128_000
|
|
23
34
|
input_token_price_1m: float = 0.0
|
|
24
35
|
output_token_price_1m: float = 0.0
|
|
@@ -50,7 +50,7 @@ class BaseMemoryService(ABC):
|
|
|
50
50
|
pass
|
|
51
51
|
|
|
52
52
|
@abstractmethod
|
|
53
|
-
def load_conversation_context(self, session_id: str):
|
|
53
|
+
def load_conversation_context(self, session_id: str, agent_name: str = "None"):
|
|
54
54
|
pass
|
|
55
55
|
|
|
56
56
|
@abstractmethod
|
|
@@ -88,7 +88,7 @@ class BaseMemoryService(ABC):
|
|
|
88
88
|
pass
|
|
89
89
|
|
|
90
90
|
@abstractmethod
|
|
91
|
-
def
|
|
91
|
+
def list_memory_headers(
|
|
92
92
|
self,
|
|
93
93
|
from_date: Optional[int] = None,
|
|
94
94
|
to_date: Optional[int] = None,
|