litellm-adk 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ from .agents import LiteLLMAgent
2
+ from .tools import tool, tool_registry
3
+ from .config.settings import settings
4
+ from .memory import BaseMemory, InMemoryMemory, FileMemory, MongoDBMemory
5
+
6
+ __all__ = [
7
+ "LiteLLMAgent",
8
+ "tool",
9
+ "tool_registry",
10
+ "settings",
11
+ "BaseMemory",
12
+ "InMemoryMemory",
13
+ "FileMemory",
14
+ "MongoDBMemory"
15
+ ]
@@ -0,0 +1,3 @@
1
+ from ..core.agent import LiteLLMAgent
2
+
3
+ __all__ = ["LiteLLMAgent"]
@@ -0,0 +1,32 @@
1
+ from typing import Optional
2
+ from pydantic_settings import BaseSettings, SettingsConfigDict
3
+ from pydantic import Field
4
+
5
+ class Settings(BaseSettings):
6
+ """
7
+ Application settings, loaded from environment variables or .env file.
8
+ """
9
+ model_config = SettingsConfigDict(
10
+ env_file=".env",
11
+ env_file_encoding="utf-8",
12
+ extra="ignore",
13
+ env_prefix="ADK_" # Support ADK_ prefixed env vars
14
+ )
15
+
16
+ # Core LLM Settings
17
+ model: str = Field(default="gpt-4o", description="The model to use.")
18
+ api_key: Optional[str] = Field(default=None, description="Global API key.")
19
+ base_url: Optional[str] = Field(default=None, description="Global base URL.")
20
+
21
+ # Provider-specific keys (fallback)
22
+ openai_api_key: Optional[str] = None
23
+ anthropic_api_key: Optional[str] = None
24
+ cohere_api_key: Optional[str] = None
25
+
26
+ # Logging
27
+ log_level: str = Field(default="INFO", description="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
28
+
29
+ # Agent Defaults
30
+ sequential_execution: bool = Field(default=True, description="Default sequential tool execution mode.")
31
+
32
+ settings = Settings()
@@ -0,0 +1,496 @@
1
+ import litellm
2
+ import json
3
+ from typing import List, Dict, Any, Optional, Union, Generator, AsyncGenerator
4
+ from .base import BaseAgent
5
+ from ..observability.logger import adk_logger
6
+ from ..config.settings import settings
7
+ from ..tools.registry import tool_registry
8
+ from ..memory.base import BaseMemory
9
+ from ..memory.in_memory import InMemoryMemory
10
+ import uuid
11
+
12
+ # Global LiteLLM configuration for resilience
13
+ litellm.drop_params = True
14
+
15
+ class LiteLLMAgent(BaseAgent):
16
+ """
17
+ Agent powered by LiteLLM, supporting dynamic overrides for base_url and api_key.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ model: Optional[str] = None,
23
+ api_key: Optional[str] = None,
24
+ base_url: Optional[str] = None,
25
+ system_prompt: str = "You are a helpful assistant.",
26
+ tools: Optional[List[Dict[str, Any]]] = None,
27
+ memory: Optional[BaseMemory] = None,
28
+ session_id: Optional[str] = None,
29
+ **kwargs
30
+ ):
31
+ self.model = model or settings.model
32
+ self.api_key = api_key or settings.api_key
33
+ self.base_url = base_url or settings.base_url
34
+
35
+ # Automatically prepend 'openai/' if a base_url is used to force proxy/OpenAI-compatible routing
36
+ if self.base_url and not self.model.startswith("openai/"):
37
+ adk_logger.debug(f"Custom base_url detected. Prepending 'openai/' to model {self.model}")
38
+ self.model = f"openai/{self.model}"
39
+
40
+ self.system_prompt = system_prompt
41
+
42
+ # Smart Tool Resolution
43
+ if tools is None:
44
+ # Default to all registered tools if none provided
45
+ self.tools = tool_registry.get_tool_definitions()
46
+ else:
47
+ # Process provided list (can be definitions OR functions)
48
+ processed_tools = []
49
+ for t in tools:
50
+ if callable(t):
51
+ # It's a function, register it (if not already) and get definition
52
+ processed_tools.append(tool_registry._register_function(t))
53
+ else:
54
+ # It's already a definition dict
55
+ processed_tools.append(t)
56
+ self.tools = processed_tools
57
+
58
+ self.extra_kwargs = kwargs
59
+
60
+ # Ensure model-specific parameters # Default parallel_tool_calls if not explicitly provided
61
+ if "parallel_tool_calls" not in self.extra_kwargs:
62
+ self.extra_kwargs["parallel_tool_calls"] = False
63
+
64
+ self.sequential_tool_execution = kwargs.get("sequential_tool_execution", settings.sequential_execution)
65
+
66
+ # Memory Persistence
67
+ self.memory = memory or InMemoryMemory()
68
+ self.session_id = session_id or str(uuid.uuid4())
69
+ self.history = self.memory.get_messages(self.session_id)
70
+
71
+ if not self.history:
72
+ self.history = [{"role": "system", "content": self.system_prompt}]
73
+ self.memory.add_message(self.session_id, self.history[0])
74
+
75
+ adk_logger.debug(f"Initialized LiteLLMAgent with session_id={self.session_id}, model={self.model}")
76
+
77
+ def _prepare_messages(self, prompt: str) -> List[Dict[str, str]]:
78
+ # Refresh from memory in case it was modified elsewhere
79
+ self.history = self.memory.get_messages(self.session_id)
80
+
81
+ messages = self.history.copy()
82
+ user_msg = {"role": "user", "content": prompt}
83
+ messages.append(user_msg)
84
+
85
+ # Persist the user message immediately
86
+ self.memory.add_message(self.session_id, user_msg)
87
+ self.history.append(user_msg)
88
+
89
+ return messages
90
+
91
+ def _update_history(self, final_messages: List[Dict[str, Any]]):
92
+ """Sync internal history and memory with the final message state."""
93
+ # Find which messages were added since we prepared (the user message was already added)
94
+ # We assume messages order is preserved
95
+ start_idx = len(self.history)
96
+ new_messages = [self._sanitize_message(m) for m in final_messages[start_idx:]]
97
+
98
+ if new_messages:
99
+ self.memory.add_messages(self.session_id, new_messages)
100
+ self.history.extend(new_messages)
101
+
102
+ def _sanitize_message(self, message: Any) -> Dict[str, Any]:
103
+ """Convert LiteLLM message objects to plain dictionaries for serialization."""
104
+ if isinstance(message, dict):
105
+ # Still need to sanitize tool_calls inside if they are objects
106
+ if "tool_calls" in message and message["tool_calls"]:
107
+ message["tool_calls"] = [self._sanitize_tool_call(tc) for tc in message["tool_calls"]]
108
+ return message
109
+
110
+ # Manually extract common fields to ensure clean JSON
111
+ msg_dict = {
112
+ "role": getattr(message, "role", "assistant"),
113
+ "content": getattr(message, "content", None)
114
+ }
115
+
116
+ if hasattr(message, "name") and message.name:
117
+ msg_dict["name"] = message.name
118
+
119
+ if hasattr(message, "tool_calls") and message.tool_calls:
120
+ msg_dict["tool_calls"] = [self._sanitize_tool_call(tc) for tc in message.tool_calls]
121
+
122
+ if hasattr(message, "tool_call_id") and message.tool_call_id:
123
+ msg_dict["tool_call_id"] = message.tool_call_id
124
+
125
+ return msg_dict
126
+
127
+ def _sanitize_tool_call(self, tc: Any) -> Dict[str, Any]:
128
+ """Convert a tool call object to a standard dictionary."""
129
+ if isinstance(tc, dict):
130
+ return tc
131
+
132
+ tc_dict = {
133
+ "id": getattr(tc, "id", None),
134
+ "type": getattr(tc, "type", "function"),
135
+ "function": {
136
+ "name": None,
137
+ "arguments": ""
138
+ }
139
+ }
140
+
141
+ func = getattr(tc, "function", None)
142
+ if func:
143
+ tc_dict["function"]["name"] = getattr(func, "name", None)
144
+ tc_dict["function"]["arguments"] = getattr(func, "arguments", "")
145
+
146
+ return tc_dict
147
+
148
+ def _should_handle_sequentially(self) -> bool:
149
+ """Determines if we should process tool calls one by one."""
150
+ return self.sequential_tool_execution
151
+
152
+ async def _aexecute_tool(self, tool_call) -> Any:
153
+ # Same as _execute_tool but for async if needed in future
154
+ return self._execute_tool(tool_call)
155
+
156
+ def _get_tc_val(self, tool_call, attr, subattr=None):
157
+ """Helper to get value from either object or dict tool call."""
158
+ if isinstance(tool_call, dict):
159
+ val = tool_call.get(attr)
160
+ if val and subattr:
161
+ return val.get(subattr)
162
+ return val
163
+ else:
164
+ val = getattr(tool_call, attr, None)
165
+ if val and subattr:
166
+ return getattr(val, subattr, None)
167
+ return val
168
+
169
+ def _execute_tool(self, tool_call) -> Any:
170
+ """Helper to execute a tool call and handle JSON parsing."""
171
+ function_name = self._get_tc_val(tool_call, "function", "name")
172
+ raw_args = self._get_tc_val(tool_call, "function", "arguments") or "{}"
173
+
174
+ try:
175
+ if isinstance(raw_args, dict):
176
+ arguments = raw_args
177
+ else:
178
+ # Try standard parsing
179
+ arguments = json.loads(raw_args)
180
+ except json.JSONDecodeError:
181
+ # RECOVERY: Handle concatenated JSON objects like {"a":1}{"b":2}
182
+ if isinstance(raw_args, str) and "}{" in raw_args:
183
+ try:
184
+ # Take only the first valid JSON object
185
+ decoder = json.JSONDecoder()
186
+ arguments, _ = decoder.raw_decode(raw_args)
187
+ except Exception:
188
+ adk_logger.error(f"Failed to recover tool arguments: {raw_args}")
189
+ arguments = {}
190
+ else:
191
+ adk_logger.warning(f"Failed to parse tool arguments for {function_name}: {raw_args}")
192
+ arguments = {}
193
+
194
+ return tool_registry.execute(function_name, **arguments)
195
+
196
+ def invoke(self, prompt: str, tools: Optional[List[Dict[str, Any]]] = None, **kwargs) -> str:
197
+ """
198
+ Execute a synchronous completion with automatic tool calling.
199
+ """
200
+ messages = self._prepare_messages(prompt)
201
+ tools = tools or self.tools
202
+
203
+ adk_logger.info(f"Invoking completion for model: {self.model}")
204
+
205
+ while True:
206
+ response = litellm.completion(
207
+ model=self.model,
208
+ messages=messages,
209
+ api_key=self.api_key,
210
+ base_url=self.base_url,
211
+ tools=tools,
212
+ **{**self.extra_kwargs, **kwargs}
213
+ )
214
+
215
+ message = response.choices[0].message
216
+
217
+ # Check if the model wants to call tools
218
+ if hasattr(message, "tool_calls") and message.tool_calls:
219
+ # If sequential is enabled, we only process the FIRST tool call
220
+ tool_calls_to_process = [message.tool_calls[0]] if self._should_handle_sequentially() else message.tool_calls
221
+
222
+ # We update the original message to only include the calls we are handling
223
+ # (to keep history clean for strict models)
224
+ if self._should_handle_sequentially():
225
+ message.tool_calls = tool_calls_to_process
226
+
227
+ sanitized_msg = self._sanitize_message(message)
228
+ messages.append(sanitized_msg)
229
+
230
+ for tool_call in tool_calls_to_process:
231
+ result = self._execute_tool(tool_call)
232
+
233
+ messages.append({
234
+ "role": "tool",
235
+ "tool_call_id": tool_call.id,
236
+ "name": tool_call.function.name,
237
+ "content": str(result)
238
+ })
239
+
240
+ continue
241
+
242
+ messages.append(self._sanitize_message(message))
243
+ self._update_history(messages)
244
+ return message.content
245
+
246
+ async def ainvoke(self, prompt: str, tools: Optional[List[Dict[str, Any]]] = None, **kwargs) -> str:
247
+ """
248
+ Execute an asynchronous completion with automatic tool calling.
249
+ """
250
+ messages = self._prepare_messages(prompt)
251
+ tools = tools or self.tools
252
+
253
+ adk_logger.info(f"Invoking async completion for model: {self.model}")
254
+
255
+ while True:
256
+ response = await litellm.acompletion(
257
+ model=self.model,
258
+ messages=messages,
259
+ api_key=self.api_key,
260
+ base_url=self.base_url,
261
+ tools=tools,
262
+ **{**self.extra_kwargs, **kwargs}
263
+ )
264
+
265
+ message = response.choices[0].message
266
+
267
+ if hasattr(message, "tool_calls") and message.tool_calls:
268
+ tool_calls_to_process = [message.tool_calls[0]] if self._should_handle_sequentially() else message.tool_calls
269
+
270
+ if self._should_handle_sequentially():
271
+ message.tool_calls = tool_calls_to_process
272
+
273
+ sanitized_msg = self._sanitize_message(message)
274
+ messages.append(sanitized_msg)
275
+
276
+ for tool_call in tool_calls_to_process:
277
+ result = self._execute_tool(tool_call)
278
+ messages.append({
279
+ "role": "tool",
280
+ "tool_call_id": tool_call.id,
281
+ "name": tool_call.function.name,
282
+ "content": str(result)
283
+ })
284
+ continue
285
+
286
+ messages.append(self._sanitize_message(message))
287
+ self._update_history(messages)
288
+ return message.content
289
+
290
+ def stream(self, prompt: str, tools: Optional[List[Dict[str, Any]]] = None, **kwargs) -> Generator[str, None, None]:
291
+ """
292
+ Execute a streaming completion with automatic tool calling.
293
+ """
294
+ messages = self._prepare_messages(prompt)
295
+ tools = tools or self.tools
296
+
297
+ while True:
298
+ response = litellm.completion(
299
+ model=self.model,
300
+ messages=messages,
301
+ api_key=self.api_key,
302
+ base_url=self.base_url,
303
+ stream=True,
304
+ tools=tools,
305
+ **{**self.extra_kwargs, **kwargs}
306
+ )
307
+
308
+ # Accumulate tool call parts
309
+ full_content = ""
310
+ tool_calls_by_index = {} # map of index -> list of SimpleNamespace
311
+
312
+ for chunk in response:
313
+ delta = chunk.choices[0].delta
314
+ if delta.content:
315
+ full_content += delta.content
316
+ yield delta.content
317
+
318
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
319
+ for tc_delta in delta.tool_calls:
320
+ idx = tc_delta.index
321
+ if idx not in tool_calls_by_index:
322
+ tool_calls_by_index[idx] = []
323
+
324
+ last_tc = tool_calls_by_index[idx][-1] if tool_calls_by_index[idx] else None
325
+
326
+ # Decide if we need a new tool call object for this index
327
+ start_new = False
328
+ if last_tc is None:
329
+ start_new = True
330
+ else:
331
+ # Start new if name is present and last one already has a name
332
+ if tc_delta.function and tc_delta.function.name and last_tc.function.name:
333
+ start_new = True
334
+ # Start new if ID is present and last one already has a different ID
335
+ elif tc_delta.id and last_tc.id and tc_delta.id != last_tc.id:
336
+ start_new = True
337
+
338
+ if start_new:
339
+ from types import SimpleNamespace
340
+ new_tc = SimpleNamespace(
341
+ id=tc_delta.id,
342
+ function=SimpleNamespace(
343
+ name=tc_delta.function.name if tc_delta.function else None,
344
+ arguments=tc_delta.function.arguments if tc_delta.function else ""
345
+ )
346
+ )
347
+ tool_calls_by_index[idx].append(new_tc)
348
+ else:
349
+ # Update existing tool call
350
+ if tc_delta.id:
351
+ last_tc.id = tc_delta.id
352
+ if tc_delta.function:
353
+ if tc_delta.function.name:
354
+ last_tc.function.name = (last_tc.function.name or "") + tc_delta.function.name
355
+ if tc_delta.function.arguments:
356
+ if last_tc.function.arguments is None:
357
+ last_tc.function.arguments = ""
358
+ last_tc.function.arguments += tc_delta.function.arguments
359
+
360
+ # Build final flattened tool calls list (as dicts for history)
361
+ tool_calls = []
362
+ for idx in sorted(tool_calls_by_index.keys()):
363
+ for tc_obj in tool_calls_by_index[idx]:
364
+ if tc_obj.function.name:
365
+ tool_calls.append({
366
+ "id": tc_obj.id,
367
+ "type": "function",
368
+ "function": {
369
+ "name": tc_obj.function.name,
370
+ "arguments": tc_obj.function.arguments
371
+ }
372
+ })
373
+
374
+ if tool_calls:
375
+ # If sequential, only keep the first tool call
376
+ if self._should_handle_sequentially():
377
+ tool_calls = [tool_calls[0]]
378
+
379
+ # Add the assistant's composite tool call message to history
380
+ assistant_msg = {"role": "assistant", "tool_calls": tool_calls, "content": full_content or None}
381
+ messages.append(assistant_msg)
382
+
383
+ for tool_call in tool_calls:
384
+ result = self._execute_tool(tool_call)
385
+ messages.append({
386
+ "role": "tool",
387
+ "tool_call_id": tool_call["id"],
388
+ "name": tool_call["function"]["name"],
389
+ "content": str(result)
390
+ })
391
+
392
+ # Loop back to continue the conversation with tool results
393
+ continue
394
+
395
+ # No tool calls, store final content and finish
396
+ messages.append({"role": "assistant", "content": full_content})
397
+ self._update_history(messages)
398
+ return
399
+
400
+ async def astream(self, prompt: str, tools: Optional[List[Dict[str, Any]]] = None, **kwargs) -> AsyncGenerator[str, None]:
401
+ """
402
+ Execute an asynchronous streaming completion with automatic tool calling.
403
+ """
404
+ messages = self._prepare_messages(prompt)
405
+ tools = tools or self.tools
406
+
407
+ while True:
408
+ response = await litellm.acompletion(
409
+ model=self.model,
410
+ messages=messages,
411
+ api_key=self.api_key,
412
+ base_url=self.base_url,
413
+ stream=True,
414
+ tools=tools,
415
+ **{**self.extra_kwargs, **kwargs}
416
+ )
417
+
418
+ full_content = ""
419
+ tool_calls_by_index = {}
420
+
421
+ async for chunk in response:
422
+ delta = chunk.choices[0].delta
423
+ if delta.content:
424
+ full_content += delta.content
425
+ yield delta.content
426
+
427
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
428
+ for tc_delta in delta.tool_calls:
429
+ idx = tc_delta.index
430
+ if idx not in tool_calls_by_index:
431
+ tool_calls_by_index[idx] = []
432
+
433
+ last_tc = tool_calls_by_index[idx][-1] if tool_calls_by_index[idx] else None
434
+ start_new = False
435
+ if last_tc is None:
436
+ start_new = True
437
+ else:
438
+ if tc_delta.function and tc_delta.function.name and last_tc.function.name:
439
+ start_new = True
440
+ elif tc_delta.id and last_tc.id and tc_delta.id != last_tc.id:
441
+ start_new = True
442
+
443
+ if start_new:
444
+ from types import SimpleNamespace
445
+ new_tc = SimpleNamespace(
446
+ id=tc_delta.id,
447
+ function=SimpleNamespace(
448
+ name=tc_delta.function.name if tc_delta.function else None,
449
+ arguments=tc_delta.function.arguments if tc_delta.function else ""
450
+ )
451
+ )
452
+ tool_calls_by_index[idx].append(new_tc)
453
+ else:
454
+ if tc_delta.id:
455
+ last_tc.id = tc_delta.id
456
+ if tc_delta.function:
457
+ if tc_delta.function.name:
458
+ last_tc.function.name = (last_tc.function.name or "") + tc_delta.function.name
459
+ if tc_delta.function.arguments:
460
+ if last_tc.function.arguments is None:
461
+ last_tc.function.arguments = ""
462
+ last_tc.function.arguments += tc_delta.function.arguments
463
+
464
+ tool_calls = []
465
+ for idx in sorted(tool_calls_by_index.keys()):
466
+ for tc_obj in tool_calls_by_index[idx]:
467
+ if tc_obj.function.name:
468
+ tool_calls.append({
469
+ "id": tc_obj.id,
470
+ "type": "function",
471
+ "function": {
472
+ "name": tc_obj.function.name,
473
+ "arguments": tc_obj.function.arguments
474
+ }
475
+ })
476
+
477
+ if tool_calls:
478
+ if self._should_handle_sequentially():
479
+ tool_calls = [tool_calls[0]]
480
+
481
+ assistant_msg = {"role": "assistant", "tool_calls": tool_calls, "content": full_content or None}
482
+ messages.append(assistant_msg)
483
+
484
+ for tool_call in tool_calls:
485
+ result = self._execute_tool(tool_call)
486
+ messages.append({
487
+ "role": "tool",
488
+ "tool_call_id": tool_call["id"],
489
+ "name": tool_call["function"]["name"],
490
+ "content": str(result)
491
+ })
492
+ continue
493
+
494
+ messages.append({"role": "assistant", "content": full_content})
495
+ self._update_history(messages)
496
+ return
@@ -0,0 +1,14 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any, Optional, Union, Generator, AsyncGenerator
3
+
4
+ class BaseAgent(ABC):
5
+ """
6
+ Abstract Base Class for all agents.
7
+ """
8
+ @abstractmethod
9
+ def invoke(self, prompt: str, **kwargs) -> Any:
10
+ pass
11
+
12
+ @abstractmethod
13
+ async def ainvoke(self, prompt: str, **kwargs) -> Any:
14
+ pass
@@ -0,0 +1,6 @@
1
+ from .base import BaseMemory
2
+ from .in_memory import InMemoryMemory
3
+ from .file import FileMemory
4
+ from .mongodb import MongoDBMemory
5
+
6
+ __all__ = ["BaseMemory", "InMemoryMemory", "FileMemory", "MongoDBMemory"]
@@ -0,0 +1,27 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any, Optional
3
+
4
+ class BaseMemory(ABC):
5
+ """
6
+ Abstract Base Class for memory persistence.
7
+ """
8
+
9
+ @abstractmethod
10
+ def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
11
+ """Retrieve all messages for a given session."""
12
+ pass
13
+
14
+ @abstractmethod
15
+ def add_message(self, session_id: str, message: Dict[str, Any]):
16
+ """Add a single message to a session."""
17
+ pass
18
+
19
+ @abstractmethod
20
+ def add_messages(self, session_id: str, messages: List[Dict[str, Any]]):
21
+ """Add multiple messages to a session."""
22
+ pass
23
+
24
+ @abstractmethod
25
+ def clear(self, session_id: str):
26
+ """Clear history for a session."""
27
+ pass
@@ -0,0 +1,47 @@
1
+ import json
2
+ import os
3
+ from typing import List, Dict, Any, Optional
4
+ from .base import BaseMemory
5
+
6
+ class FileMemory(BaseMemory):
7
+ """
8
+ JSON file-based persistence for conversation history.
9
+ """
10
+ def __init__(self, file_path: str = "conversations.json"):
11
+ self.file_path = file_path
12
+ self._cache: Dict[str, List[Dict[str, Any]]] = {}
13
+ self._load()
14
+
15
+ def _load(self):
16
+ if os.path.exists(self.file_path):
17
+ with open(self.file_path, "r", encoding="utf-8") as f:
18
+ try:
19
+ self._cache = json.load(f)
20
+ except json.JSONDecodeError:
21
+ self._cache = {}
22
+ else:
23
+ self._cache = {}
24
+
25
+ def _save(self):
26
+ with open(self.file_path, "w", encoding="utf-8") as f:
27
+ json.dump(self._cache, f, indent=2, ensure_ascii=False)
28
+
29
+ def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
30
+ return self._cache.get(session_id, []).copy()
31
+
32
+ def add_message(self, session_id: str, message: Dict[str, Any]):
33
+ if session_id not in self._cache:
34
+ self._cache[session_id] = []
35
+ self._cache[session_id].append(message)
36
+ self._save()
37
+
38
+ def add_messages(self, session_id: str, messages: List[Dict[str, Any]]):
39
+ if session_id not in self._cache:
40
+ self._cache[session_id] = []
41
+ self._cache[session_id].extend(messages)
42
+ self._save()
43
+
44
+ def clear(self, session_id: str):
45
+ if session_id in self._cache:
46
+ self._cache[session_id] = []
47
+ self._save()
@@ -0,0 +1,26 @@
1
+ from typing import List, Dict, Any
2
+ from .base import BaseMemory
3
+
4
+ class InMemoryMemory(BaseMemory):
5
+ """
6
+ Standard in-memory store for conversation history.
7
+ """
8
+ def __init__(self):
9
+ self._storage: Dict[str, List[Dict[str, Any]]] = {}
10
+
11
+ def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
12
+ return self._storage.get(session_id, []).copy()
13
+
14
+ def add_message(self, session_id: str, message: Dict[str, Any]):
15
+ if session_id not in self._storage:
16
+ self._storage[session_id] = []
17
+ self._storage[session_id].append(message)
18
+
19
+ def add_messages(self, session_id: str, messages: List[Dict[str, Any]]):
20
+ if session_id not in self._storage:
21
+ self._storage[session_id] = []
22
+ self._storage[session_id].extend(messages)
23
+
24
+ def clear(self, session_id: str):
25
+ if session_id in self._storage:
26
+ self._storage[session_id] = []
@@ -0,0 +1,47 @@
1
+ from typing import List, Dict, Any, Optional
2
+ from .base import BaseMemory
3
+ import pymongo
4
+
5
+ class MongoDBMemory(BaseMemory):
6
+ """
7
+ MongoDB-based persistence for conversation history.
8
+ """
9
+ def __init__(
10
+ self,
11
+ connection_string: str = "mongodb://localhost:27017/",
12
+ database_name: str = "litellm_adk",
13
+ collection_name: str = "conversations"
14
+ ):
15
+ self.client = pymongo.MongoClient(connection_string)
16
+ self.db = self.client[database_name]
17
+ self.collection = self.db[collection_name]
18
+ # Create index on session_id for faster lookups
19
+ self.collection.create_index("session_id", unique=True)
20
+
21
+ def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
22
+ doc = self.collection.find_one({"session_id": session_id})
23
+ if doc:
24
+ # MongoDB returns a list of dicts, but we need to ensure
25
+ # we return a copy to prevent in-place modifications
26
+ return list(doc.get("messages", []))
27
+ return []
28
+
29
+ def add_message(self, session_id: str, message: Dict[str, Any]):
30
+ self.collection.update_one(
31
+ {"session_id": session_id},
32
+ {"$push": {"messages": message}},
33
+ upsert=True
34
+ )
35
+
36
+ def add_messages(self, session_id: str, messages: List[Dict[str, Any]]):
37
+ self.collection.update_one(
38
+ {"session_id": session_id},
39
+ {"$push": {"messages": {"$each": messages}}},
40
+ upsert=True
41
+ )
42
+
43
+ def clear(self, session_id: str):
44
+ self.collection.update_one(
45
+ {"session_id": session_id},
46
+ {"$set": {"messages": []}}
47
+ )
@@ -0,0 +1,19 @@
1
+ import sys
2
+ from loguru import logger
3
+ from ..config.settings import settings
4
+
5
+ def setup_logger():
6
+ """
7
+ Configures loguru logger based on application settings.
8
+ """
9
+ logger.remove() # Remove default handler
10
+ logger.add(
11
+ sys.stderr,
12
+ format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
13
+ level=settings.log_level
14
+ )
15
+ return logger
16
+
17
+ # Initialize global logger
18
+ adk_logger = setup_logger()
19
+ adk_logger.info("LiteLLM ADK Logger Initialized")
@@ -0,0 +1,3 @@
1
+ from .registry import tool, tool_registry
2
+
3
+ __all__ = ["tool", "tool_registry"]
@@ -0,0 +1,86 @@
1
+ import inspect
2
+ import json
3
+ from typing import Any, Callable, Dict, List, Optional, Type
4
+ from pydantic import BaseModel, create_model
5
+ from ..observability.logger import adk_logger
6
+
7
+ class ToolRegistry:
8
+ """
9
+ Registry for managing tools that can be called by agents.
10
+ """
11
+ def __init__(self):
12
+ self._tools: Dict[str, Dict[str, Any]] = {}
13
+
14
+ def register(self, name_or_func: Any = None):
15
+ """
16
+ Decorator to register a function as a tool.
17
+ Supports both @tool and @tool(name="...")
18
+ """
19
+ if callable(name_or_func):
20
+ self._register_function(name_or_func)
21
+ return name_or_func
22
+
23
+ def decorator(func: Callable):
24
+ self._register_function(func, name_or_func)
25
+ return func
26
+ return decorator
27
+
28
+ def _register_function(self, func: Callable, name: Optional[str] = None) -> Dict[str, Any]:
29
+ """Internal helper to register a function and return its definition."""
30
+ tool_name = name or func.__name__
31
+ description = func.__doc__ or f"Tool: {tool_name}"
32
+
33
+ sig = inspect.signature(func)
34
+ parameters = {}
35
+ for param_name, param in sig.parameters.items():
36
+ if param_name == "self": continue
37
+ param_type = "string"
38
+ if param.annotation == int: param_type = "integer"
39
+ elif param.annotation == float: param_type = "number"
40
+ elif param.annotation == bool: param_type = "boolean"
41
+
42
+ parameters[param_name] = {
43
+ "type": param_type,
44
+ "description": f"Parameter {param_name}"
45
+ }
46
+
47
+ definition = {
48
+ "type": "function",
49
+ "function": {
50
+ "name": tool_name,
51
+ "description": description.strip(),
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": parameters,
55
+ "required": [p.name for p in sig.parameters.values() if p.default == inspect.Parameter.empty and p.name != "self"]
56
+ }
57
+ }
58
+ }
59
+
60
+ self._tools[tool_name] = {
61
+ "name": tool_name,
62
+ "func": func,
63
+ "definition": definition
64
+ }
65
+ adk_logger.debug(f"Registered tool: {tool_name}")
66
+ return definition
67
+
68
+ def get_tool_definitions(self) -> List[Dict[str, Any]]:
69
+ """
70
+ Returns list of tool definitions in OpenAI format.
71
+ """
72
+ return [t["definition"] for t in self._tools.values()]
73
+
74
+ def execute(self, name: str, **kwargs) -> Any:
75
+ """
76
+ Executes a registered tool by name with keyword arguments.
77
+ """
78
+ if name not in self._tools:
79
+ raise ValueError(f"Tool '{name}' not found in registry.")
80
+
81
+ adk_logger.info(f"Executing tool: {name} with args: {kwargs}")
82
+ return self._tools[name]["func"](**kwargs)
83
+
84
+ # Global tool registry
85
+ tool_registry = ToolRegistry()
86
+ tool = tool_registry.register
@@ -0,0 +1,98 @@
1
+ Metadata-Version: 2.4
2
+ Name: litellm-adk
3
+ Version: 0.1.2
4
+ Summary: Production-grade Agent Development Kit powered by LiteLLM
5
+ License-Expression: MIT
6
+ Classifier: Programming Language :: Python :: 3
7
+ Requires-Python: >=3.9
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: litellm>=1.20.0
11
+ Requires-Dist: pydantic>=2.0.0
12
+ Requires-Dist: pydantic-settings>=2.0.0
13
+ Requires-Dist: loguru>=0.7.0
14
+ Requires-Dist: python-dotenv>=1.0.0
15
+ Requires-Dist: pymongo>=4.0.0
16
+ Provides-Extra: dev
17
+ Requires-Dist: pytest; extra == "dev"
18
+ Requires-Dist: pytest-asyncio; extra == "dev"
19
+ Requires-Dist: black; extra == "dev"
20
+ Requires-Dist: isort; extra == "dev"
21
+ Requires-Dist: mypy; extra == "dev"
22
+ Dynamic: license-file
23
+
24
+ # LiteLLM ADK (Agent Development Kit)
25
+
26
+ A production-grade, highly flexible Agent Development Kit powered by [LiteLLM](https://github.com/BerriAI/litellm).
27
+
28
+ Built for developers who need to swap models, API keys, and base URLs dynamically while maintaining a robust structure for tool usage, **modular memory persistence**, and observability.
29
+
30
+ ## Features
31
+
32
+ - **Model Agnostic**: Access 100+ LLMs (OpenAI, Anthropic, OCI Grok-3, Llama, etc.) via LiteLLM.
33
+ - **Easy Tools**: Register Python functions with the `@tool` decorator. No manual JSON schema management.
34
+ - **Modular Memory**: Native support for conversation persistence:
35
+ - `InMemoryMemory`: Fast, ephemeral storage.
36
+ - `FileMemory`: Simple JSON-based local persistence.
37
+ - `MongoDBMemory`: Scalable, remote persistence.
38
+ - **Parallel & Sequential Execution**: Built-in support for parallel tool calls with robust stream accumulation.
39
+ - **Dynamic Configuration**: Global defaults via `.env` or per-agent/per-request overrides.
40
+ - **Async & Streaming**: Native support for `ainvoke`, `stream`, and `astream`.
41
+
42
+ ## Installation
43
+
44
+ ```bash
45
+ pip install litellm-adk
46
+ ```
47
+
48
+ ## Quick Start
49
+
50
+ ### Simple Conversational Agent
51
+
52
+ ```python
53
+ from litellm_adk.agents import LiteLLMAgent
54
+ from litellm_adk.memory import FileMemory
55
+
56
+ # Setup persistent memory
57
+ memory = FileMemory("chat_history.json")
58
+
59
+ agent = LiteLLMAgent(
60
+ model="gpt-4",
61
+ memory=memory,
62
+ session_id="user-123"
63
+ )
64
+
65
+ response = agent.invoke("My name is Alice.")
66
+ print(agent.invoke("What is my name?")) # Alice
67
+ ```
68
+
69
+ ### Registering Tools
70
+
71
+ ```python
72
+ from litellm_adk.tools import tool
73
+
74
+ @tool
75
+ def get_weather(location: str):
76
+ """Get the current weather for a location."""
77
+ return f"The weather in {location} is sunny."
78
+
79
+ agent = LiteLLMAgent(tools=[get_weather])
80
+ agent.invoke("What is the weather in London?")
81
+ ```
82
+
83
+ ## Configuration
84
+
85
+ The ADK uses `pydantic-settings`. Configure via `.env`:
86
+
87
+ - `ADK_MODEL`: Default model (e.g., `gpt-4o`).
88
+ - `ADK_API_KEY`: Default API key.
89
+ - `ADK_BASE_URL`: Global base URL override.
90
+ - `ADK_LOG_LEVEL`: DEBUG, INFO, etc.
91
+
92
+ ## Documentation
93
+ - [Example: Basic Tools](./examples/demo.py)
94
+ - [Example: Persistent Memory](./examples/memory_demo.py)
95
+
96
+ ## License
97
+
98
+ MIT
@@ -0,0 +1,18 @@
1
+ litellm_adk/__init__.py,sha256=DfQsjG9D_5OvxX-8tOtGY3kuCxYb6Hn_I-2BvnTqf0M,368
2
+ litellm_adk/agents/__init__.py,sha256=KsiCcyYyn0iJo0sZsd7n3nJ5ezVEqJqrrTP1b9ryG0M,69
3
+ litellm_adk/config/settings.py,sha256=sdI4PrJKzIRJPm5vEBKdiecRcjrHNASsYbmtOiBnY_c,1182
4
+ litellm_adk/core/agent.py,sha256=_5lgc1Q6jtE4kRkM_eFijOHdaK0REzJ5jgXrMtq1bFA,22255
5
+ litellm_adk/core/base.py,sha256=ov2bZk_a15FFGsQSdKwHrQ1cvALdZM8ByK5hGvFWyL0,386
6
+ litellm_adk/memory/__init__.py,sha256=ICPUbV0PsTHEQSm0S35_d1ToeyrgMVFs_hRokvRRJL4,212
7
+ litellm_adk/memory/base.py,sha256=Bm33oPaLNOJdG0RJGc38g387GSMyi_ymQjOMlDexTyk,788
8
+ litellm_adk/memory/file.py,sha256=C0pB1pWJ4HtjCn6ICe54pL3cVmVnPI6D5jRvQaffgE4,1623
9
+ litellm_adk/memory/in_memory.py,sha256=AVMV7iqb-UvbPE-CZmRi14LkV-7hEqrkwtkrwlxvy_w,951
10
+ litellm_adk/memory/mongodb.py,sha256=M7IQsgahT6ALSNTQ2AKjSUWGR7uuz-KielBYIu_oLVk,1657
11
+ litellm_adk/observability/logger.py,sha256=PXr20D7gtDIrg6eZD8Hm1-tfAuTXyUVDUMD9-8Aw32E,619
12
+ litellm_adk/tools/__init__.py,sha256=J-Rkx-psP5sZXgcy5h4mygvQd-tZUONKLYt4LSOiEV8,82
13
+ litellm_adk/tools/registry.py,sha256=M_48BpN0XSea_3msjGSyyDDRWu9uBNDLDtLh9Vh5yp8,3089
14
+ litellm_adk-0.1.2.dist-info/licenses/LICENSE,sha256=BfYjX2LxngGX9t6Dk1Y5ptJNAkKcQuGG-OAR9jsKUGM,1091
15
+ litellm_adk-0.1.2.dist-info/METADATA,sha256=AoHLb725fdU6blWIbpXX7bvQkG2ePk5kAgHXxTD_qTA,3016
16
+ litellm_adk-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
17
+ litellm_adk-0.1.2.dist-info/top_level.txt,sha256=30MPgkTEjMUe8z-jnjMM2vbtqdghK_isd_ufRQ1w2hM,12
18
+ litellm_adk-0.1.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Aarumugapandi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ litellm_adk