kader 1.0.0__tar.gz → 1.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {kader-1.0.0 → kader-1.1.0}/PKG-INFO +2 -1
- {kader-1.0.0 → kader-1.1.0}/cli/app.py +10 -6
- kader-1.1.0/cli/llm_factory.py +165 -0
- {kader-1.0.0 → kader-1.1.0}/cli/utils.py +19 -11
- kader-1.1.0/examples/google_example.py +331 -0
- {kader-1.0.0 → kader-1.1.0}/kader/agent/base.py +16 -2
- {kader-1.0.0 → kader-1.1.0}/kader/config.py +10 -2
- {kader-1.0.0 → kader-1.1.0}/kader/providers/__init__.py +2 -0
- kader-1.1.0/kader/providers/google.py +690 -0
- {kader-1.0.0 → kader-1.1.0}/pyproject.toml +2 -1
- kader-1.1.0/tests/providers/test_google.py +505 -0
- {kader-1.0.0 → kader-1.1.0}/uv.lock +274 -1
- {kader-1.0.0 → kader-1.1.0}/.github/workflows/ci.yml +0 -0
- {kader-1.0.0 → kader-1.1.0}/.github/workflows/release.yml +0 -0
- {kader-1.0.0 → kader-1.1.0}/.gitignore +0 -0
- {kader-1.0.0 → kader-1.1.0}/.python-version +0 -0
- {kader-1.0.0 → kader-1.1.0}/.qwen/QWEN.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/.qwen/agents/technical-writer.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/.qwen/agents/test-automation-specialist.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/README.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/README.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/__main__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/app.tcss +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/widgets/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/widgets/confirmation.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/widgets/conversation.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/cli/widgets/loading.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/.gitignore +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/README.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/memory_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/ollama_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/planner_executor_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/planning_agent_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/python_developer/main.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/python_developer/template.yaml +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/react_agent_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/simple_agent.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/todo_agent/main.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/examples/tools_example.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/agent/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/agent/agents.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/agent/logger.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/memory/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/memory/conversation.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/memory/session.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/memory/state.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/memory/types.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/agent_prompts.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/templates/executor_agent.j2 +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/templates/kader_planner.j2 +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/templates/planning_agent.j2 +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/prompts/templates/react_agent.j2 +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/providers/base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/providers/mock.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/providers/ollama.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/README.md +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/agent.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/exec_commands.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/filesys.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/filesystem.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/protocol.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/rag.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/todo.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/utils.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/tools/web.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/utils/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/utils/checkpointer.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/utils/context_aggregator.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/workflows/__init__.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/workflows/base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/kader/workflows/planner_executor.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/conftest.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/providers/test_mock.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/providers/test_ollama.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/providers/test_providers_base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/test_agent_logger.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/test_agent_logger_integration.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/test_base_agent.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/test_file_memory.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/test_todo_tool.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_agent_tool.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_agent_tool_persistence.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_exec_commands.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_filesys_tools.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_filesystem_tools.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_rag.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_tools_base.py +0 -0
- {kader-1.0.0 → kader-1.1.0}/tests/tools/test_web.py +0 -0
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kader
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: kader coding agent
|
|
5
5
|
Requires-Python: >=3.11
|
|
6
6
|
Requires-Dist: aiofiles>=25.1.0
|
|
7
7
|
Requires-Dist: faiss-cpu>=1.9.0
|
|
8
|
+
Requires-Dist: google-genai>=1.61.0
|
|
8
9
|
Requires-Dist: jinja2>=3.1.6
|
|
9
10
|
Requires-Dist: loguru>=0.7.3
|
|
10
11
|
Requires-Dist: ollama>=0.6.1
|
|
@@ -26,6 +26,7 @@ from kader.memory import (
|
|
|
26
26
|
)
|
|
27
27
|
from kader.workflows import PlannerExecutorWorkflow
|
|
28
28
|
|
|
29
|
+
from .llm_factory import LLMProviderFactory
|
|
29
30
|
from .utils import (
|
|
30
31
|
DEFAULT_MODEL,
|
|
31
32
|
HELP_TEXT,
|
|
@@ -114,9 +115,13 @@ class KaderApp(App):
|
|
|
114
115
|
|
|
115
116
|
def _create_workflow(self, model_name: str) -> PlannerExecutorWorkflow:
|
|
116
117
|
"""Create a new PlannerExecutorWorkflow with the specified model."""
|
|
118
|
+
# Create provider using factory (supports provider:model format)
|
|
119
|
+
provider = LLMProviderFactory.create_provider(model_name)
|
|
120
|
+
|
|
117
121
|
return PlannerExecutorWorkflow(
|
|
118
122
|
name="kader_cli",
|
|
119
|
-
|
|
123
|
+
provider=provider,
|
|
124
|
+
model_name=model_name, # Keep for reference
|
|
120
125
|
interrupt_before_tool=True,
|
|
121
126
|
tool_confirmation_callback=self._tool_confirmation_callback,
|
|
122
127
|
direct_execution_callback=self._direct_execution_callback,
|
|
@@ -268,13 +273,12 @@ class KaderApp(App):
|
|
|
268
273
|
|
|
269
274
|
async def _show_model_selector(self, conversation: ConversationView) -> None:
|
|
270
275
|
"""Show the model selector widget."""
|
|
271
|
-
from kader.providers import OllamaProvider
|
|
272
|
-
|
|
273
276
|
try:
|
|
274
|
-
models
|
|
277
|
+
# Get models from all available providers
|
|
278
|
+
models = LLMProviderFactory.get_flat_model_list()
|
|
275
279
|
if not models:
|
|
276
280
|
conversation.add_message(
|
|
277
|
-
"## Models (^^)\n\n*No models found.
|
|
281
|
+
"## Models (^^)\n\n*No models found. Check provider configurations.*",
|
|
278
282
|
"assistant",
|
|
279
283
|
)
|
|
280
284
|
return
|
|
@@ -569,7 +573,7 @@ Please resize your terminal."""
|
|
|
569
573
|
|
|
570
574
|
except Exception as e:
|
|
571
575
|
spinner.stop()
|
|
572
|
-
error_msg = f"(-) **Error:** {str(e)}\n\nMake sure
|
|
576
|
+
error_msg = f"(-) **Error:** {str(e)}\n\nMake sure the provider for `{self._current_model}` is configured and available."
|
|
573
577
|
conversation.add_message(error_msg, "assistant")
|
|
574
578
|
self.notify(f"Error: {e}", severity="error")
|
|
575
579
|
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""LLM Provider Factory for Kader CLI.
|
|
2
|
+
|
|
3
|
+
Factory pattern implementation for creating LLM provider instances
|
|
4
|
+
with automatic provider detection based on model name format.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from kader.providers import GoogleProvider, OllamaProvider
|
|
10
|
+
from kader.providers.base import BaseLLMProvider, ModelConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LLMProviderFactory:
|
|
14
|
+
"""
|
|
15
|
+
Factory for creating LLM provider instances.
|
|
16
|
+
|
|
17
|
+
Supports multiple providers with automatic detection based on model name format.
|
|
18
|
+
Model names can be specified as:
|
|
19
|
+
- "provider:model" (e.g., "google:gemini-2.5-flash", "ollama:kimi-k2.5:cloud")
|
|
20
|
+
- "model" (defaults to Ollama for backward compatibility)
|
|
21
|
+
|
|
22
|
+
Example:
|
|
23
|
+
factory = LLMProviderFactory()
|
|
24
|
+
provider = factory.create_provider("google:gemini-2.5-flash")
|
|
25
|
+
|
|
26
|
+
# Or with default provider (Ollama)
|
|
27
|
+
provider = factory.create_provider("kimi-k2.5:cloud")
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
# Registered provider classes
|
|
31
|
+
PROVIDERS: dict[str, type[BaseLLMProvider]] = {
|
|
32
|
+
"ollama": OllamaProvider,
|
|
33
|
+
"google": GoogleProvider,
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
# Default provider when no prefix is specified
|
|
37
|
+
DEFAULT_PROVIDER = "ollama"
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def parse_model_name(cls, model_string: str) -> tuple[str, str]:
|
|
41
|
+
"""
|
|
42
|
+
Parse model string to extract provider and model name.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
model_string: Model string in format "provider:model" or just "model"
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Tuple of (provider_name, model_name)
|
|
49
|
+
"""
|
|
50
|
+
# Check if the string starts with a known provider prefix
|
|
51
|
+
for provider_name in cls.PROVIDERS.keys():
|
|
52
|
+
prefix = f"{provider_name}:"
|
|
53
|
+
if model_string.lower().startswith(prefix):
|
|
54
|
+
return provider_name, model_string[len(prefix) :]
|
|
55
|
+
|
|
56
|
+
# No known provider prefix found, use default
|
|
57
|
+
return cls.DEFAULT_PROVIDER, model_string
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def create_provider(
|
|
61
|
+
cls,
|
|
62
|
+
model_string: str,
|
|
63
|
+
config: Optional[ModelConfig] = None,
|
|
64
|
+
) -> BaseLLMProvider:
|
|
65
|
+
"""
|
|
66
|
+
Create an LLM provider instance.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
model_string: Model identifier (e.g., "google:gemini-2.5-flash" or "kimi-k2.5:cloud")
|
|
70
|
+
config: Optional model configuration
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Configured provider instance
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
ValueError: If provider is not supported
|
|
77
|
+
"""
|
|
78
|
+
provider_name, model_name = cls.parse_model_name(model_string)
|
|
79
|
+
|
|
80
|
+
provider_class = cls.PROVIDERS.get(provider_name)
|
|
81
|
+
if not provider_class:
|
|
82
|
+
supported = ", ".join(cls.PROVIDERS.keys())
|
|
83
|
+
raise ValueError(
|
|
84
|
+
f"Unknown provider: {provider_name}. Supported: {supported}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
return provider_class(model=model_name, default_config=config)
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def get_all_models(cls) -> dict[str, list[str]]:
|
|
91
|
+
"""
|
|
92
|
+
Get all available models from all registered providers.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Dictionary mapping provider names to their available models
|
|
96
|
+
(with provider prefix included in model names)
|
|
97
|
+
"""
|
|
98
|
+
models: dict[str, list[str]] = {}
|
|
99
|
+
|
|
100
|
+
# Get Ollama models
|
|
101
|
+
try:
|
|
102
|
+
ollama_models = OllamaProvider.get_supported_models()
|
|
103
|
+
models["ollama"] = [f"ollama:{m}" for m in ollama_models]
|
|
104
|
+
except Exception:
|
|
105
|
+
models["ollama"] = []
|
|
106
|
+
|
|
107
|
+
# Get Google models
|
|
108
|
+
try:
|
|
109
|
+
google_models = GoogleProvider.get_supported_models()
|
|
110
|
+
models["google"] = [f"google:{m}" for m in google_models]
|
|
111
|
+
except Exception:
|
|
112
|
+
models["google"] = []
|
|
113
|
+
|
|
114
|
+
return models
|
|
115
|
+
|
|
116
|
+
@classmethod
|
|
117
|
+
def get_flat_model_list(cls) -> list[str]:
|
|
118
|
+
"""
|
|
119
|
+
Get a flattened list of all available models with provider prefixes.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
List of model strings in "provider:model" format
|
|
123
|
+
"""
|
|
124
|
+
all_models = cls.get_all_models()
|
|
125
|
+
flat_list: list[str] = []
|
|
126
|
+
for models in all_models.values():
|
|
127
|
+
flat_list.extend(models)
|
|
128
|
+
return flat_list
|
|
129
|
+
|
|
130
|
+
@classmethod
|
|
131
|
+
def is_provider_available(cls, provider_name: str) -> bool:
|
|
132
|
+
"""
|
|
133
|
+
Check if a provider is available and configured.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
provider_name: Name of the provider to check
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
True if provider is available and has models, False otherwise
|
|
140
|
+
"""
|
|
141
|
+
provider_name = provider_name.lower()
|
|
142
|
+
if provider_name not in cls.PROVIDERS:
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
# Try to get models to verify provider is working
|
|
146
|
+
try:
|
|
147
|
+
provider_class = cls.PROVIDERS[provider_name]
|
|
148
|
+
models = provider_class.get_supported_models()
|
|
149
|
+
return len(models) > 0
|
|
150
|
+
except Exception:
|
|
151
|
+
return False
|
|
152
|
+
|
|
153
|
+
@classmethod
|
|
154
|
+
def get_provider_name(cls, model_string: str) -> str:
|
|
155
|
+
"""
|
|
156
|
+
Get the provider name for a given model string.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
model_string: Model string in format "provider:model" or just "model"
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Provider name (e.g., "ollama", "google")
|
|
163
|
+
"""
|
|
164
|
+
provider_name, _ = cls.parse_model_name(model_string)
|
|
165
|
+
return provider_name
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"""Utility constants and helpers for Kader CLI."""
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from .llm_factory import LLMProviderFactory
|
|
4
4
|
|
|
5
|
-
# Default model
|
|
6
|
-
DEFAULT_MODEL = "kimi-k2.5:cloud"
|
|
5
|
+
# Default model (with provider prefix for clarity)
|
|
6
|
+
DEFAULT_MODEL = "ollama:kimi-k2.5:cloud"
|
|
7
7
|
|
|
8
8
|
HELP_TEXT = """## Kader CLI Commands
|
|
9
9
|
|
|
@@ -40,24 +40,32 @@ HELP_TEXT = """## Kader CLI Commands
|
|
|
40
40
|
### Tips:
|
|
41
41
|
- Type any question to chat with the AI
|
|
42
42
|
- Use **Tab** to navigate between panels
|
|
43
|
+
- Model format: `provider:model` (e.g., `google:gemini-2.5-flash`)
|
|
43
44
|
"""
|
|
44
45
|
|
|
45
46
|
|
|
46
47
|
def get_models_text() -> str:
|
|
47
|
-
"""Get formatted text of available
|
|
48
|
+
"""Get formatted text of available models from all providers."""
|
|
48
49
|
try:
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
50
|
+
all_models = LLMProviderFactory.get_all_models()
|
|
51
|
+
flat_list = LLMProviderFactory.get_flat_model_list()
|
|
52
|
+
|
|
53
|
+
if not flat_list:
|
|
54
|
+
return "## Available Models (^^)\n\n*No models found. Check provider configurations.*"
|
|
52
55
|
|
|
53
56
|
lines = [
|
|
54
57
|
"## Available Models (^^)\n",
|
|
55
|
-
"| Model | Status |",
|
|
56
|
-
"
|
|
58
|
+
"| Provider | Model | Status |",
|
|
59
|
+
"|----------|-------|--------|",
|
|
57
60
|
]
|
|
58
|
-
for
|
|
59
|
-
|
|
61
|
+
for provider_name, provider_models in all_models.items():
|
|
62
|
+
for model in provider_models:
|
|
63
|
+
lines.append(f"| {provider_name.title()} | `{model}` | (+) Available |")
|
|
64
|
+
|
|
60
65
|
lines.append(f"\n*Currently using: **{DEFAULT_MODEL}***")
|
|
66
|
+
lines.append(
|
|
67
|
+
"\n> (!) Tip: Use `provider:model` format (e.g., `google:gemini-2.5-flash`)"
|
|
68
|
+
)
|
|
61
69
|
return "\n".join(lines)
|
|
62
70
|
except Exception as e:
|
|
63
71
|
return f"## Available Models (^^)\n\n*Error fetching models: {e}*"
|
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Provider Example
|
|
3
|
+
|
|
4
|
+
Demonstrates how to use the Kader Google provider for:
|
|
5
|
+
- Basic LLM invocation
|
|
6
|
+
- Streaming responses
|
|
7
|
+
- Asynchronous operations
|
|
8
|
+
- Configuration options
|
|
9
|
+
- Tool/function calling
|
|
10
|
+
- Dynamic model listing
|
|
11
|
+
|
|
12
|
+
API Key Setup:
|
|
13
|
+
Set your GEMINI_API_KEY in ~/.kader/.env:
|
|
14
|
+
GEMINI_API_KEY='your-api-key-here'
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
import os
|
|
19
|
+
import sys
|
|
20
|
+
|
|
21
|
+
# Add project root to path for direct execution
|
|
22
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
|
23
|
+
|
|
24
|
+
from kader.providers.base import Message, ModelConfig
|
|
25
|
+
from kader.providers.google import GoogleProvider
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def demo_basic_invocation():
|
|
29
|
+
"""Demonstrate basic synchronous invocation."""
|
|
30
|
+
print("\n=== Basic Google Invocation Demo ===")
|
|
31
|
+
|
|
32
|
+
# Initialize the provider with a model
|
|
33
|
+
# Note: Set GEMINI_API_KEY or GOOGLE_API_KEY environment variable
|
|
34
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
35
|
+
|
|
36
|
+
# Create a simple conversation
|
|
37
|
+
messages = [
|
|
38
|
+
Message.system("You are a helpful assistant that responds concisely."),
|
|
39
|
+
Message.user("What are the benefits of using Google Gemini models?"),
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
# Invoke the model synchronously
|
|
44
|
+
response = provider.invoke(messages)
|
|
45
|
+
|
|
46
|
+
print(f"Model: {response.model}")
|
|
47
|
+
print(f"Content: {response.content}")
|
|
48
|
+
print(f"Prompt tokens: {response.usage.prompt_tokens}")
|
|
49
|
+
print(f"Completion tokens: {response.usage.completion_tokens}")
|
|
50
|
+
print(f"Total tokens: {response.usage.total_tokens}")
|
|
51
|
+
print(f"Finish reason: {response.finish_reason}")
|
|
52
|
+
|
|
53
|
+
# Show cost tracking
|
|
54
|
+
if response.cost:
|
|
55
|
+
print(f"Cost: {response.cost.format()}")
|
|
56
|
+
|
|
57
|
+
# Show total usage tracking
|
|
58
|
+
print(f"Total usage tracked: {provider.total_usage.total_tokens} tokens")
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
print(f"Error during invocation: {e}")
|
|
62
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def demo_streaming():
|
|
66
|
+
"""Demonstrate streaming responses."""
|
|
67
|
+
print("\n=== Google Streaming Demo ===")
|
|
68
|
+
|
|
69
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
70
|
+
|
|
71
|
+
messages = [Message.user("Write a short poem about artificial intelligence.")]
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
print("Streaming response:")
|
|
75
|
+
full_content = ""
|
|
76
|
+
for chunk in provider.stream(messages):
|
|
77
|
+
if chunk.delta:
|
|
78
|
+
print(chunk.delta, end="", flush=True)
|
|
79
|
+
full_content = chunk.content
|
|
80
|
+
|
|
81
|
+
print(f"\n\nFinal content length: {len(full_content)} characters")
|
|
82
|
+
print(f"Total usage: {provider.total_usage.total_tokens} tokens")
|
|
83
|
+
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(f"Error during streaming: {e}")
|
|
86
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def demo_async_invocation():
|
|
90
|
+
"""Demonstrate asynchronous invocation."""
|
|
91
|
+
print("\n=== Google Async Invocation Demo ===")
|
|
92
|
+
|
|
93
|
+
async def async_demo():
|
|
94
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
95
|
+
|
|
96
|
+
messages = [
|
|
97
|
+
Message.system("You are a helpful assistant."),
|
|
98
|
+
Message.user(
|
|
99
|
+
"What is the difference between Gemini 2.5 Flash and Pro models?"
|
|
100
|
+
),
|
|
101
|
+
]
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Asynchronously invoke the model
|
|
105
|
+
response = await provider.ainvoke(messages)
|
|
106
|
+
|
|
107
|
+
print(f"Model: {response.model}")
|
|
108
|
+
print(f"Content: {response.content}")
|
|
109
|
+
print(f"Tokens: {response.usage.total_tokens}")
|
|
110
|
+
print(f"Finish reason: {response.finish_reason}")
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
print(f"Error during async invocation: {e}")
|
|
114
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
115
|
+
|
|
116
|
+
asyncio.run(async_demo())
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def demo_async_streaming():
|
|
120
|
+
"""Demonstrate asynchronous streaming."""
|
|
121
|
+
print("\n=== Google Async Streaming Demo ===")
|
|
122
|
+
|
|
123
|
+
async def async_stream_demo():
|
|
124
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
125
|
+
|
|
126
|
+
messages = [Message.user("Explain quantum computing in simple terms.")]
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
print("Async streaming response:")
|
|
130
|
+
full_content = ""
|
|
131
|
+
async for chunk in provider.astream(messages):
|
|
132
|
+
if chunk.delta:
|
|
133
|
+
print(chunk.delta, end="", flush=True)
|
|
134
|
+
full_content = chunk.content
|
|
135
|
+
|
|
136
|
+
print(f"\n\nFinal content length: {len(full_content)} characters")
|
|
137
|
+
|
|
138
|
+
except Exception as e:
|
|
139
|
+
print(f"Error during async streaming: {e}")
|
|
140
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
141
|
+
|
|
142
|
+
asyncio.run(async_stream_demo())
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def demo_configuration():
|
|
146
|
+
"""Demonstrate using different configurations."""
|
|
147
|
+
print("\n=== Google Configuration Demo ===")
|
|
148
|
+
|
|
149
|
+
# Create a provider with default configuration
|
|
150
|
+
default_config = ModelConfig(
|
|
151
|
+
temperature=0.7, # More creative
|
|
152
|
+
max_tokens=150, # Limit response length
|
|
153
|
+
top_p=0.9,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
provider = GoogleProvider(model="gemini-2.5-flash", default_config=default_config)
|
|
157
|
+
|
|
158
|
+
messages = [Message.user("Tell me a creative fact about space.")]
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
# This will use the default configuration
|
|
162
|
+
response = provider.invoke(messages)
|
|
163
|
+
print(f"Using default config - Content: {response.content[:100]}...")
|
|
164
|
+
print(f"Tokens: {response.usage.total_tokens}")
|
|
165
|
+
|
|
166
|
+
# Override configuration for this specific call
|
|
167
|
+
creative_config = ModelConfig(
|
|
168
|
+
temperature=1.2, # Even more creative/random
|
|
169
|
+
max_tokens=200,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
messages = [Message.user("Generate an original haiku about technology.")]
|
|
173
|
+
response = provider.invoke(messages, config=creative_config)
|
|
174
|
+
print(f"\nUsing creative config - Content: {response.content}")
|
|
175
|
+
|
|
176
|
+
except Exception as e:
|
|
177
|
+
print(f"Error during configuration demo: {e}")
|
|
178
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def demo_conversation_history():
|
|
182
|
+
"""Demonstrate maintaining conversation context."""
|
|
183
|
+
print("\n=== Google Conversation History Demo ===")
|
|
184
|
+
|
|
185
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
186
|
+
|
|
187
|
+
# Simulate a multi-turn conversation
|
|
188
|
+
conversation = [
|
|
189
|
+
Message.system("You are a helpful coding assistant."),
|
|
190
|
+
Message.user("What is Python used for?"),
|
|
191
|
+
Message.assistant(
|
|
192
|
+
"Python is a versatile programming language used for web development, data science, AI/ML, automation, and more."
|
|
193
|
+
),
|
|
194
|
+
Message.user("Can you give me a simple Python example?"),
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
response = provider.invoke(conversation)
|
|
199
|
+
print(f"Response to follow-up: {response.content}")
|
|
200
|
+
print(f"Tokens used: {response.usage.total_tokens}")
|
|
201
|
+
|
|
202
|
+
except Exception as e:
|
|
203
|
+
print(f"Error during conversation demo: {e}")
|
|
204
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def demo_list_models():
|
|
208
|
+
"""Demonstrate listing available models dynamically."""
|
|
209
|
+
print("\n=== Google List Models Demo ===")
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
# Get supported models without creating a provider instance
|
|
213
|
+
models = GoogleProvider.get_supported_models()
|
|
214
|
+
|
|
215
|
+
print(f"Found {len(models)} Gemini models:")
|
|
216
|
+
for model in models[:10]: # Show first 10
|
|
217
|
+
print(f" - {model}")
|
|
218
|
+
|
|
219
|
+
if len(models) > 10:
|
|
220
|
+
print(f" ... and {len(models) - 10} more")
|
|
221
|
+
|
|
222
|
+
except Exception as e:
|
|
223
|
+
print(f"Error listing models: {e}")
|
|
224
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def demo_model_info():
|
|
228
|
+
"""Demonstrate getting model information."""
|
|
229
|
+
print("\n=== Google Model Info Demo ===")
|
|
230
|
+
|
|
231
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
model_info = provider.get_model_info()
|
|
235
|
+
|
|
236
|
+
if model_info:
|
|
237
|
+
print(f"Model Name: {model_info.name}")
|
|
238
|
+
print(f"Provider: {model_info.provider}")
|
|
239
|
+
print(f"Context Window: {model_info.context_window:,} tokens")
|
|
240
|
+
print(f"Max Output Tokens: {model_info.max_output_tokens}")
|
|
241
|
+
print(f"Supports Tools: {model_info.supports_tools}")
|
|
242
|
+
print(f"Supports Streaming: {model_info.supports_streaming}")
|
|
243
|
+
print(f"Supports Vision: {model_info.supports_vision}")
|
|
244
|
+
else:
|
|
245
|
+
print("Could not retrieve model info.")
|
|
246
|
+
|
|
247
|
+
except Exception as e:
|
|
248
|
+
print(f"Error getting model info: {e}")
|
|
249
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def demo_token_counting():
|
|
253
|
+
"""Demonstrate token counting."""
|
|
254
|
+
print("\n=== Google Token Counting Demo ===")
|
|
255
|
+
|
|
256
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
257
|
+
|
|
258
|
+
try:
|
|
259
|
+
# Count tokens in a string
|
|
260
|
+
text = "Hello, how are you today? I'm looking forward to our conversation."
|
|
261
|
+
token_count = provider.count_tokens(text)
|
|
262
|
+
print(f"Text: '{text}'")
|
|
263
|
+
print(f"Token count: {token_count}")
|
|
264
|
+
|
|
265
|
+
# Count tokens in messages
|
|
266
|
+
messages = [
|
|
267
|
+
Message.system("You are a helpful assistant."),
|
|
268
|
+
Message.user("What is the meaning of life?"),
|
|
269
|
+
]
|
|
270
|
+
msg_token_count = provider.count_tokens(messages)
|
|
271
|
+
print(f"\nMessages token count: {msg_token_count}")
|
|
272
|
+
|
|
273
|
+
except Exception as e:
|
|
274
|
+
print(f"Error counting tokens: {e}")
|
|
275
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def demo_cost_estimation():
|
|
279
|
+
"""Demonstrate cost estimation."""
|
|
280
|
+
print("\n=== Google Cost Estimation Demo ===")
|
|
281
|
+
|
|
282
|
+
provider = GoogleProvider(model="gemini-2.5-flash")
|
|
283
|
+
|
|
284
|
+
messages = [Message.user("Write a brief summary of machine learning.")]
|
|
285
|
+
|
|
286
|
+
try:
|
|
287
|
+
response = provider.invoke(messages)
|
|
288
|
+
|
|
289
|
+
print(f"Response: {response.content[:100]}...")
|
|
290
|
+
print("\nUsage:")
|
|
291
|
+
print(f" Prompt tokens: {response.usage.prompt_tokens}")
|
|
292
|
+
print(f" Completion tokens: {response.usage.completion_tokens}")
|
|
293
|
+
print(f" Total tokens: {response.usage.total_tokens}")
|
|
294
|
+
|
|
295
|
+
if response.cost:
|
|
296
|
+
print("\nCost Breakdown:")
|
|
297
|
+
print(f" Input cost: ${response.cost.input_cost:.6f}")
|
|
298
|
+
print(f" Output cost: ${response.cost.output_cost:.6f}")
|
|
299
|
+
print(f" Total cost: {response.cost.format()}")
|
|
300
|
+
|
|
301
|
+
except Exception as e:
|
|
302
|
+
print(f"Error during cost estimation demo: {e}")
|
|
303
|
+
print("Make sure GEMINI_API_KEY or GOOGLE_API_KEY is set.")
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def main():
|
|
307
|
+
"""Run all Google provider demos."""
|
|
308
|
+
print("Kader Google Provider Examples")
|
|
309
|
+
print("=" * 40)
|
|
310
|
+
|
|
311
|
+
print("\nAPI Key Setup:")
|
|
312
|
+
print(" Add your GEMINI_API_KEY to ~/.kader/.env:")
|
|
313
|
+
print(" GEMINI_API_KEY='your-api-key-here'")
|
|
314
|
+
print("\n Get your API key from: https://aistudio.google.com/apikey")
|
|
315
|
+
|
|
316
|
+
demo_basic_invocation()
|
|
317
|
+
demo_streaming()
|
|
318
|
+
demo_async_invocation()
|
|
319
|
+
demo_async_streaming()
|
|
320
|
+
demo_configuration()
|
|
321
|
+
demo_conversation_history()
|
|
322
|
+
demo_list_models()
|
|
323
|
+
demo_model_info()
|
|
324
|
+
demo_token_counting()
|
|
325
|
+
demo_cost_estimation()
|
|
326
|
+
|
|
327
|
+
print("\n[OK] All Google demos completed!")
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
if __name__ == "__main__":
|
|
331
|
+
main()
|
|
@@ -23,7 +23,9 @@ from kader.providers.base import (
|
|
|
23
23
|
Message,
|
|
24
24
|
ModelConfig,
|
|
25
25
|
StreamChunk,
|
|
26
|
+
Usage,
|
|
26
27
|
)
|
|
28
|
+
from kader.providers.google import GoogleProvider
|
|
27
29
|
from kader.providers.ollama import OllamaProvider
|
|
28
30
|
from kader.tools import BaseTool, ToolRegistry
|
|
29
31
|
|
|
@@ -222,6 +224,8 @@ class BaseAgent:
|
|
|
222
224
|
provider_type = "openai"
|
|
223
225
|
if isinstance(self.provider, OllamaProvider):
|
|
224
226
|
provider_type = "ollama"
|
|
227
|
+
elif isinstance(self.provider, GoogleProvider):
|
|
228
|
+
provider_type = "google"
|
|
225
229
|
|
|
226
230
|
base_config = ModelConfig(
|
|
227
231
|
temperature=base_config.temperature,
|
|
@@ -624,7 +628,12 @@ class BaseAgent:
|
|
|
624
628
|
)
|
|
625
629
|
|
|
626
630
|
# estimate the cost...
|
|
627
|
-
|
|
631
|
+
usage_obj = Usage(
|
|
632
|
+
prompt_tokens=token_usage["prompt_tokens"],
|
|
633
|
+
completion_tokens=token_usage["completion_tokens"],
|
|
634
|
+
total_tokens=token_usage["total_tokens"],
|
|
635
|
+
)
|
|
636
|
+
estimated_cost = self.provider.estimate_cost(usage_obj)
|
|
628
637
|
|
|
629
638
|
# Calculate and log cost
|
|
630
639
|
agent_logger.calculate_cost(
|
|
@@ -796,7 +805,12 @@ class BaseAgent:
|
|
|
796
805
|
)
|
|
797
806
|
|
|
798
807
|
# estimate the cost...
|
|
799
|
-
|
|
808
|
+
usage_obj = Usage(
|
|
809
|
+
prompt_tokens=token_usage["prompt_tokens"],
|
|
810
|
+
completion_tokens=token_usage["completion_tokens"],
|
|
811
|
+
total_tokens=token_usage["total_tokens"],
|
|
812
|
+
)
|
|
813
|
+
estimated_cost = self.provider.estimate_cost(usage_obj)
|
|
800
814
|
|
|
801
815
|
# Calculate and log cost
|
|
802
816
|
agent_logger.calculate_cost(
|
|
@@ -69,13 +69,21 @@ def ensure_kader_directory():
|
|
|
69
69
|
def ensure_env_file(kader_dir):
|
|
70
70
|
"""
|
|
71
71
|
Ensure that the .env file exists in the .kader directory with the
|
|
72
|
-
required
|
|
72
|
+
required API key configurations.
|
|
73
73
|
"""
|
|
74
74
|
env_file = kader_dir / ".env"
|
|
75
75
|
|
|
76
76
|
# Create the .env file if it doesn't exist
|
|
77
77
|
if not env_file.exists():
|
|
78
|
-
|
|
78
|
+
default_env_content = """# Kader Configuration
|
|
79
|
+
# Ollama API Key (for local Ollama models)
|
|
80
|
+
OLLAMA_API_KEY=''
|
|
81
|
+
|
|
82
|
+
# Google Gemini API Key (for Google Gemini models)
|
|
83
|
+
# Get your API key from: https://aistudio.google.com/apikey
|
|
84
|
+
GEMINI_API_KEY=''
|
|
85
|
+
"""
|
|
86
|
+
env_file.write_text(default_env_content, encoding="utf-8")
|
|
79
87
|
|
|
80
88
|
# Set appropriate permissions for the .env file on Unix-like systems
|
|
81
89
|
if not sys.platform.startswith("win"):
|