adelie-ai 0.3.1 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/adelie/__init__.py +1 -1
- package/adelie/config.py +1 -0
- package/adelie/llm_client.py +2 -1
- package/package.json +1 -1
package/adelie/__init__.py
CHANGED
package/adelie/config.py
CHANGED
|
@@ -25,6 +25,7 @@ GEMINI_MODEL: str = os.getenv("GEMINI_MODEL", "gemini-2.0-flash")
|
|
|
25
25
|
OLLAMA_BASE_URL: str = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
|
26
26
|
OLLAMA_MODEL: str = os.getenv("OLLAMA_MODEL", "llama3.2")
|
|
27
27
|
OLLAMA_API_KEY: str = os.getenv("OLLAMA_API_KEY", "") # For Ollama Cloud
|
|
28
|
+
LLM_TIMEOUT: int = int(os.getenv("LLM_TIMEOUT", "300")) # Request timeout in seconds
|
|
28
29
|
|
|
29
30
|
# ── Model Fallback ───────────────────────────────────────────────────────────
|
|
30
31
|
# Comma-separated fallback chain, e.g. "gemini:gemini-2.5-flash,gemini:gemini-2.0-flash,ollama:llama3.2"
|
package/adelie/llm_client.py
CHANGED
|
@@ -23,6 +23,7 @@ from adelie.config import (
|
|
|
23
23
|
GEMINI_API_KEY,
|
|
24
24
|
GEMINI_MODEL,
|
|
25
25
|
LLM_PROVIDER,
|
|
26
|
+
LLM_TIMEOUT,
|
|
26
27
|
OLLAMA_API_KEY,
|
|
27
28
|
OLLAMA_BASE_URL,
|
|
28
29
|
OLLAMA_MODEL,
|
|
@@ -362,7 +363,7 @@ def _generate_ollama_model(
|
|
|
362
363
|
headers["Authorization"] = f"Bearer {OLLAMA_API_KEY}"
|
|
363
364
|
|
|
364
365
|
try:
|
|
365
|
-
resp = requests.post(url, json=payload, headers=headers, timeout=
|
|
366
|
+
resp = requests.post(url, json=payload, headers=headers, timeout=LLM_TIMEOUT)
|
|
366
367
|
resp.raise_for_status()
|
|
367
368
|
except requests.ConnectionError:
|
|
368
369
|
raise ConnectionError(
|