devduck 0.7.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devduck might be problematic. Click here for more details.
- devduck/__init__.py +151 -67
- devduck/_version.py +2 -2
- devduck/tools/speech_to_speech.py +750 -0
- devduck-1.1.0.dist-info/METADATA +716 -0
- {devduck-0.7.0.dist-info → devduck-1.1.0.dist-info}/RECORD +9 -8
- {devduck-0.7.0.dist-info → devduck-1.1.0.dist-info}/entry_points.txt +1 -0
- devduck-0.7.0.dist-info/METADATA +0 -589
- {devduck-0.7.0.dist-info → devduck-1.1.0.dist-info}/WHEEL +0 -0
- {devduck-0.7.0.dist-info → devduck-1.1.0.dist-info}/licenses/LICENSE +0 -0
- {devduck-0.7.0.dist-info → devduck-1.1.0.dist-info}/top_level.txt +0 -0
devduck/__init__.py
CHANGED
|
@@ -20,7 +20,7 @@ from logging.handlers import RotatingFileHandler
|
|
|
20
20
|
warnings.filterwarnings("ignore", message=".*pkg_resources is deprecated.*")
|
|
21
21
|
warnings.filterwarnings("ignore", message=".*cache_prompt is deprecated.*")
|
|
22
22
|
|
|
23
|
-
os.environ["BYPASS_TOOL_CONSENT"] = "true"
|
|
23
|
+
os.environ["BYPASS_TOOL_CONSENT"] = os.getenv("BYPASS_TOOL_CONSENT", "true")
|
|
24
24
|
os.environ["STRANDS_TOOL_CONSOLE_MODE"] = "enabled"
|
|
25
25
|
os.environ["EDITOR_DISABLE_BACKUP"] = "true"
|
|
26
26
|
|
|
@@ -189,18 +189,7 @@ def manage_tools_func(
|
|
|
189
189
|
tool_names: str = None,
|
|
190
190
|
tool_path: str = None,
|
|
191
191
|
) -> Dict[str, Any]:
|
|
192
|
-
"""
|
|
193
|
-
Manage the agent's tool set at runtime using ToolRegistry.
|
|
194
|
-
|
|
195
|
-
Args:
|
|
196
|
-
action: Action to perform - "list", "add", "remove", "reload"
|
|
197
|
-
package: Package name to load tools from (e.g., "strands_tools", "strands_fun_tools")
|
|
198
|
-
tool_names: Comma-separated tool names (e.g., "shell,editor,calculator")
|
|
199
|
-
tool_path: Path to a .py file to load as a tool
|
|
200
|
-
|
|
201
|
-
Returns:
|
|
202
|
-
Dict with status and content
|
|
203
|
-
"""
|
|
192
|
+
"""Manage the agent's tool set at runtime - add, remove, list, reload tools on the fly."""
|
|
204
193
|
try:
|
|
205
194
|
if not hasattr(devduck, "agent") or not devduck.agent:
|
|
206
195
|
return {"status": "error", "content": [{"text": "Agent not initialized"}]}
|
|
@@ -633,7 +622,7 @@ class DevDuck:
|
|
|
633
622
|
|
|
634
623
|
# Load tools with flexible configuration
|
|
635
624
|
# Default tool config - user can override with DEVDUCK_TOOLS env var
|
|
636
|
-
default_tools = "devduck.tools:system_prompt,store_in_kb,ipc,tcp,websocket,mcp_server,state_manager,tray,ambient,agentcore_config,agentcore_invoke,agentcore_logs,agentcore_agents,install_tools,create_subagent,use_github
|
|
625
|
+
default_tools = "devduck.tools:system_prompt,store_in_kb,ipc,tcp,websocket,mcp_server,state_manager,tray,ambient,agentcore_config,agentcore_invoke,agentcore_logs,agentcore_agents,install_tools,create_subagent,use_github;strands_tools:shell,editor,file_read,file_write,image_reader,load_tool,retrieve,calculator,use_agent,environment,mcp_client,speak,slack;strands_fun_tools:listen,cursor,clipboard,screen_reader,bluetooth,yolo_vision"
|
|
637
626
|
|
|
638
627
|
tools_config = os.getenv("DEVDUCK_TOOLS", default_tools)
|
|
639
628
|
logger.info(f"Loading tools from config: {tools_config}")
|
|
@@ -657,7 +646,18 @@ class DevDuck:
|
|
|
657
646
|
tool_names: str = None,
|
|
658
647
|
tool_path: str = None,
|
|
659
648
|
) -> Dict[str, Any]:
|
|
660
|
-
"""
|
|
649
|
+
"""
|
|
650
|
+
Manage the agent's tool set at runtime using ToolRegistry.
|
|
651
|
+
|
|
652
|
+
Args:
|
|
653
|
+
action: Action to perform - "list", "add", "remove", "reload"
|
|
654
|
+
package: Package name to load tools from (e.g., "strands_tools", "strands_fun_tools") or "devduck.tools:speech_to_speech,system_prompt,..."
|
|
655
|
+
tool_names: Comma-separated tool names (e.g., "shell,editor,calculator")
|
|
656
|
+
tool_path: Path to a .py file to load as a tool
|
|
657
|
+
|
|
658
|
+
Returns:
|
|
659
|
+
Dict with status and content
|
|
660
|
+
"""
|
|
661
661
|
return manage_tools_func(action, package, tool_names, tool_path)
|
|
662
662
|
|
|
663
663
|
# Add built-in tools to the toolset
|
|
@@ -679,9 +679,9 @@ class DevDuck:
|
|
|
679
679
|
self.agent_model, self.model = self._select_model()
|
|
680
680
|
|
|
681
681
|
# Create agent with self-healing
|
|
682
|
-
# load_tools_from_directory controlled by DEVDUCK_LOAD_TOOLS_FROM_DIR (default:
|
|
682
|
+
# load_tools_from_directory controlled by DEVDUCK_LOAD_TOOLS_FROM_DIR (default: true)
|
|
683
683
|
load_from_dir = (
|
|
684
|
-
os.getenv("DEVDUCK_LOAD_TOOLS_FROM_DIR", "
|
|
684
|
+
os.getenv("DEVDUCK_LOAD_TOOLS_FROM_DIR", "true").lower() == "true"
|
|
685
685
|
)
|
|
686
686
|
|
|
687
687
|
self.agent = Agent(
|
|
@@ -715,39 +715,39 @@ class DevDuck:
|
|
|
715
715
|
"""
|
|
716
716
|
Load tools based on DEVDUCK_TOOLS configuration.
|
|
717
717
|
|
|
718
|
-
Format:
|
|
719
|
-
|
|
718
|
+
Format: package1:tool1,tool2;package2:tool3,tool4
|
|
719
|
+
Examples:
|
|
720
|
+
- strands_tools:shell,editor;strands_action:use_github
|
|
721
|
+
- strands_action:use_github;strands_tools:shell,use_aws
|
|
720
722
|
|
|
721
723
|
Note: Only loads what's specified in config - no automatic additions
|
|
722
724
|
"""
|
|
723
725
|
tools = []
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
tool = self._load_single_tool(
|
|
726
|
+
|
|
727
|
+
# Split by semicolon to get package groups
|
|
728
|
+
groups = config.split(";")
|
|
729
|
+
|
|
730
|
+
for group in groups:
|
|
731
|
+
group = group.strip()
|
|
732
|
+
if not group:
|
|
733
|
+
continue
|
|
734
|
+
|
|
735
|
+
# Split by colon to get package:tools
|
|
736
|
+
parts = group.split(":", 1)
|
|
737
|
+
if len(parts) != 2:
|
|
738
|
+
logger.warning(f"Invalid format: {group}")
|
|
739
|
+
continue
|
|
740
|
+
|
|
741
|
+
package = parts[0].strip()
|
|
742
|
+
tools_str = parts[1].strip()
|
|
743
|
+
|
|
744
|
+
# Parse tools (comma-separated)
|
|
745
|
+
tool_names = [t.strip() for t in tools_str.split(",") if t.strip()]
|
|
746
|
+
|
|
747
|
+
for tool_name in tool_names:
|
|
748
|
+
tool = self._load_single_tool(package, tool_name)
|
|
747
749
|
if tool:
|
|
748
750
|
tools.append(tool)
|
|
749
|
-
else:
|
|
750
|
-
logger.warning(f"Skipping segment '{segment}' - no package set")
|
|
751
751
|
|
|
752
752
|
logger.info(f"Loaded {len(tools)} tools from configuration")
|
|
753
753
|
return tools
|
|
@@ -863,25 +863,83 @@ class DevDuck:
|
|
|
863
863
|
|
|
864
864
|
def _select_model(self):
|
|
865
865
|
"""
|
|
866
|
-
Smart model selection with fallback
|
|
866
|
+
Smart model selection with fallback based on available credentials.
|
|
867
|
+
|
|
868
|
+
Priority: Bedrock → Anthropic → OpenAI → GitHub → Gemini → Cohere →
|
|
869
|
+
Writer → Mistral → LiteLLM → LlamaAPI → SageMaker →
|
|
870
|
+
LlamaCpp → MLX → Ollama
|
|
867
871
|
|
|
868
872
|
Returns:
|
|
869
873
|
Tuple of (model_instance, model_name)
|
|
870
874
|
"""
|
|
871
875
|
provider = os.getenv("MODEL_PROVIDER")
|
|
872
876
|
|
|
877
|
+
# Read common model parameters from environment
|
|
878
|
+
max_tokens = int(os.getenv("STRANDS_MAX_TOKENS", "60000"))
|
|
879
|
+
temperature = float(os.getenv("STRANDS_TEMPERATURE", "1.0"))
|
|
880
|
+
|
|
873
881
|
if not provider:
|
|
874
|
-
# Auto-detect
|
|
882
|
+
# Auto-detect based on API keys and credentials
|
|
883
|
+
# 1. Try Bedrock (AWS bearer token or STS credentials)
|
|
875
884
|
try:
|
|
876
|
-
#
|
|
877
|
-
|
|
885
|
+
# Check for bearer token first
|
|
886
|
+
if os.getenv("AWS_BEARER_TOKEN_BEDROCK"):
|
|
887
|
+
provider = "bedrock"
|
|
888
|
+
print("🦆 Using Bedrock (bearer token)")
|
|
889
|
+
else:
|
|
890
|
+
# Try STS credentials
|
|
891
|
+
import boto3
|
|
878
892
|
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
893
|
+
boto3.client("sts").get_caller_identity()
|
|
894
|
+
provider = "bedrock"
|
|
895
|
+
print("🦆 Using Bedrock")
|
|
882
896
|
except:
|
|
883
|
-
# Try
|
|
884
|
-
if
|
|
897
|
+
# 2. Try Anthropic
|
|
898
|
+
if os.getenv("ANTHROPIC_API_KEY"):
|
|
899
|
+
provider = "anthropic"
|
|
900
|
+
print("🦆 Using Anthropic")
|
|
901
|
+
# 3. Try OpenAI
|
|
902
|
+
elif os.getenv("OPENAI_API_KEY"):
|
|
903
|
+
provider = "openai"
|
|
904
|
+
print("🦆 Using OpenAI")
|
|
905
|
+
# 4. Try GitHub Models
|
|
906
|
+
elif os.getenv("GITHUB_TOKEN") or os.getenv("PAT_TOKEN"):
|
|
907
|
+
provider = "github"
|
|
908
|
+
print("🦆 Using GitHub Models")
|
|
909
|
+
# 5. Try Gemini
|
|
910
|
+
elif os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY"):
|
|
911
|
+
provider = "gemini"
|
|
912
|
+
print("🦆 Using Gemini")
|
|
913
|
+
# 6. Try Cohere
|
|
914
|
+
elif os.getenv("COHERE_API_KEY"):
|
|
915
|
+
provider = "cohere"
|
|
916
|
+
print("🦆 Using Cohere")
|
|
917
|
+
# 7. Try Writer
|
|
918
|
+
elif os.getenv("WRITER_API_KEY"):
|
|
919
|
+
provider = "writer"
|
|
920
|
+
print("🦆 Using Writer")
|
|
921
|
+
# 8. Try Mistral
|
|
922
|
+
elif os.getenv("MISTRAL_API_KEY"):
|
|
923
|
+
provider = "mistral"
|
|
924
|
+
print("🦆 Using Mistral")
|
|
925
|
+
# 9. Try LiteLLM
|
|
926
|
+
elif os.getenv("LITELLM_API_KEY"):
|
|
927
|
+
provider = "litellm"
|
|
928
|
+
print("🦆 Using LiteLLM")
|
|
929
|
+
# 10. Try LlamaAPI
|
|
930
|
+
elif os.getenv("LLAMAAPI_API_KEY"):
|
|
931
|
+
provider = "llamaapi"
|
|
932
|
+
print("🦆 Using LlamaAPI")
|
|
933
|
+
# 11. Try SageMaker
|
|
934
|
+
elif os.getenv("SAGEMAKER_ENDPOINT_NAME"):
|
|
935
|
+
provider = "sagemaker"
|
|
936
|
+
print("🦆 Using SageMaker")
|
|
937
|
+
# 12. Try LlamaCpp
|
|
938
|
+
elif os.getenv("LLAMACPP_MODEL_PATH"):
|
|
939
|
+
provider = "llamacpp"
|
|
940
|
+
print("🦆 Using LlamaCpp")
|
|
941
|
+
# 13. Try MLX on Apple Silicon
|
|
942
|
+
elif platform.system() == "Darwin" and platform.machine() in [
|
|
885
943
|
"arm64",
|
|
886
944
|
"aarch64",
|
|
887
945
|
]:
|
|
@@ -889,48 +947,72 @@ class DevDuck:
|
|
|
889
947
|
from strands_mlx import MLXModel
|
|
890
948
|
|
|
891
949
|
provider = "mlx"
|
|
892
|
-
print("🦆 Using MLX")
|
|
950
|
+
print("🦆 Using MLX (Apple Silicon)")
|
|
893
951
|
except ImportError:
|
|
894
952
|
provider = "ollama"
|
|
895
|
-
print("🦆 Using Ollama")
|
|
953
|
+
print("🦆 Using Ollama (fallback)")
|
|
954
|
+
# 14. Fallback to Ollama
|
|
896
955
|
else:
|
|
897
956
|
provider = "ollama"
|
|
898
|
-
print("🦆 Using Ollama")
|
|
957
|
+
print("🦆 Using Ollama (fallback)")
|
|
899
958
|
|
|
900
959
|
# Create model based on provider
|
|
901
960
|
if provider == "mlx":
|
|
902
961
|
from strands_mlx import MLXModel
|
|
903
962
|
|
|
904
|
-
model_name = "mlx-community/Qwen3-1.7B-4bit"
|
|
905
|
-
return
|
|
963
|
+
model_name = os.getenv("STRANDS_MODEL_ID", "mlx-community/Qwen3-1.7B-4bit")
|
|
964
|
+
return (
|
|
965
|
+
MLXModel(
|
|
966
|
+
model_id=model_name,
|
|
967
|
+
params={"temperature": temperature, "max_tokens": max_tokens},
|
|
968
|
+
),
|
|
969
|
+
model_name,
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
elif provider == "gemini":
|
|
973
|
+
from strands.models.gemini import GeminiModel
|
|
974
|
+
|
|
975
|
+
model_name = os.getenv("STRANDS_MODEL_ID", "gemini-2.5-flash")
|
|
976
|
+
api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
|
977
|
+
return (
|
|
978
|
+
GeminiModel(
|
|
979
|
+
client_args={"api_key": api_key},
|
|
980
|
+
model_id=model_name,
|
|
981
|
+
params={"temperature": temperature, "max_tokens": max_tokens},
|
|
982
|
+
),
|
|
983
|
+
model_name,
|
|
984
|
+
)
|
|
906
985
|
|
|
907
986
|
elif provider == "ollama":
|
|
908
987
|
from strands.models.ollama import OllamaModel
|
|
909
988
|
|
|
989
|
+
# Smart model selection based on OS
|
|
910
990
|
os_type = platform.system()
|
|
911
991
|
if os_type == "Darwin":
|
|
912
|
-
model_name = "qwen3:1.7b"
|
|
992
|
+
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:1.7b")
|
|
913
993
|
elif os_type == "Linux":
|
|
914
|
-
model_name = "qwen3:30b"
|
|
994
|
+
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:30b")
|
|
915
995
|
else:
|
|
916
|
-
model_name = "qwen3:8b"
|
|
996
|
+
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:8b")
|
|
917
997
|
|
|
918
998
|
return (
|
|
919
999
|
OllamaModel(
|
|
920
|
-
host="http://localhost:11434",
|
|
1000
|
+
host=os.getenv("OLLAMA_HOST", "http://localhost:11434"),
|
|
921
1001
|
model_id=model_name,
|
|
922
|
-
temperature=
|
|
1002
|
+
temperature=temperature,
|
|
1003
|
+
num_predict=max_tokens,
|
|
923
1004
|
keep_alive="5m",
|
|
924
1005
|
),
|
|
925
1006
|
model_name,
|
|
926
1007
|
)
|
|
927
1008
|
|
|
928
1009
|
else:
|
|
929
|
-
#
|
|
1010
|
+
# All other providers via create_model utility
|
|
1011
|
+
# Supports: bedrock, anthropic, openai, github, cohere, writer, mistral, litellm
|
|
930
1012
|
from strands_tools.utils.models.model import create_model
|
|
931
1013
|
|
|
932
1014
|
model = create_model(provider=provider)
|
|
933
|
-
model_name = os.getenv("STRANDS_MODEL_ID",
|
|
1015
|
+
model_name = os.getenv("STRANDS_MODEL_ID", provider)
|
|
934
1016
|
return model, model_name
|
|
935
1017
|
|
|
936
1018
|
def _build_system_prompt(self):
|
|
@@ -1012,9 +1094,11 @@ You have full access to your own source code for self-awareness and self-modific
|
|
|
1012
1094
|
|
|
1013
1095
|
## Tool Configuration:
|
|
1014
1096
|
Set DEVDUCK_TOOLS for custom tools:
|
|
1015
|
-
- Format:
|
|
1016
|
-
- Example: strands_tools:shell,editor
|
|
1097
|
+
- Format: package1:tool1,tool2;package2:tool3,tool4
|
|
1098
|
+
- Example: strands_tools:shell,editor;strands_fun_tools:clipboard
|
|
1017
1099
|
- Tools are filtered - only specified tools are loaded
|
|
1100
|
+
- Load the speech_to_speech tool when it's needed
|
|
1101
|
+
- Offload the tools when you don't need
|
|
1018
1102
|
|
|
1019
1103
|
## MCP Integration:
|
|
1020
1104
|
- **Expose as MCP Server** - Use mcp_server() to expose devduck via MCP protocol
|
devduck/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '
|
|
32
|
-
__version_tuple__ = version_tuple = (
|
|
31
|
+
__version__ = version = '1.1.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (1, 1, 0)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|