cortexdb-py 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cortexdb_py-0.1.0/PKG-INFO +63 -0
- cortexdb_py-0.1.0/README.md +42 -0
- cortexdb_py-0.1.0/cortexdb/__init__.py +7 -0
- cortexdb_py-0.1.0/cortexdb/client.py +70 -0
- cortexdb_py-0.1.0/cortexdb/exceptions.py +51 -0
- cortexdb_py-0.1.0/cortexdb/ingest.py +54 -0
- cortexdb_py-0.1.0/cortexdb/llm/__init__.py +5 -0
- cortexdb_py-0.1.0/cortexdb/llm/provider.py +187 -0
- cortexdb_py-0.1.0/cortexdb/models.py +145 -0
- cortexdb_py-0.1.0/cortexdb/query.py +262 -0
- cortexdb_py-0.1.0/cortexdb/setup.py +54 -0
- cortexdb_py-0.1.0/pyproject.toml +27 -0
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cortexdb-py
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Python SDK for CortexDB — a RAG-powered memory database
|
|
5
|
+
Author: VectorNode
|
|
6
|
+
Requires-Python: >=3.9,<4.0
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
14
|
+
Requires-Dist: eval-type-backport (>=0.3.1,<0.4.0)
|
|
15
|
+
Requires-Dist: google-genai (>=1.0.0,<2.0.0)
|
|
16
|
+
Requires-Dist: httpx (>=0.28.0,<0.29.0)
|
|
17
|
+
Requires-Dist: pydantic (>=2.0,<3.0)
|
|
18
|
+
Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
|
|
21
|
+
# CortexDB Python SDK
|
|
22
|
+
|
|
23
|
+
A Python client for the CortexDB RAG backend, providing easy access to ingestion, querying, and LLM orchestration.
|
|
24
|
+
|
|
25
|
+
## Features
|
|
26
|
+
|
|
27
|
+
- **Setup API**: Configure LLM providers (Gemini, OpenAI, Azure).
|
|
28
|
+
- **Ingest API**: Ingest documents with automatic embedding generation.
|
|
29
|
+
- **Query API**: Perform semantic search, entity retrieval, and graph traversals.
|
|
30
|
+
- **Native LLM Integration**: Uses `google-genai` for native Gemini support.
|
|
31
|
+
|
|
32
|
+
## Installation
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install -e .
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Usage
|
|
39
|
+
|
|
40
|
+
See `main.py` for a complete example of setting up a medical chatbot.
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
from cortexdb import CortexDB
|
|
44
|
+
|
|
45
|
+
db = CortexDB("http://localhost:8080")
|
|
46
|
+
|
|
47
|
+
# Setup
|
|
48
|
+
db.setup.configure(provider="GEMINI", api_key="...", chat_model="gemini-2.0-flash", embed_model="gemini-embedding-001")
|
|
49
|
+
|
|
50
|
+
# Ingest
|
|
51
|
+
db.ingest.document(uid="user-1", converser="USER", content="Hello world")
|
|
52
|
+
|
|
53
|
+
# Query
|
|
54
|
+
results = db.query.search_contexts("Hello", limit=5)
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Testing
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
pip install pytest respx
|
|
61
|
+
pytest tests/
|
|
62
|
+
```
|
|
63
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# CortexDB Python SDK
|
|
2
|
+
|
|
3
|
+
A Python client for the CortexDB RAG backend, providing easy access to ingestion, querying, and LLM orchestration.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Setup API**: Configure LLM providers (Gemini, OpenAI, Azure).
|
|
8
|
+
- **Ingest API**: Ingest documents with automatic embedding generation.
|
|
9
|
+
- **Query API**: Perform semantic search, entity retrieval, and graph traversals.
|
|
10
|
+
- **Native LLM Integration**: Uses `google-genai` for native Gemini support.
|
|
11
|
+
|
|
12
|
+
## Installation
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install -e .
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
See `main.py` for a complete example of setting up a medical chatbot.
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
from cortexdb import CortexDB
|
|
24
|
+
|
|
25
|
+
db = CortexDB("http://localhost:8080")
|
|
26
|
+
|
|
27
|
+
# Setup
|
|
28
|
+
db.setup.configure(provider="GEMINI", api_key="...", chat_model="gemini-2.0-flash", embed_model="gemini-embedding-001")
|
|
29
|
+
|
|
30
|
+
# Ingest
|
|
31
|
+
db.ingest.document(uid="user-1", converser="USER", content="Hello world")
|
|
32
|
+
|
|
33
|
+
# Query
|
|
34
|
+
results = db.query.search_contexts("Hello", limit=5)
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Testing
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install pytest respx
|
|
41
|
+
pytest tests/
|
|
42
|
+
```
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Main CortexDB client — entry point for the Python SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from cortexdb.ingest import IngestAPI
|
|
8
|
+
from cortexdb.query import QueryAPI
|
|
9
|
+
from cortexdb.setup import SetupAPI
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CortexDB:
|
|
13
|
+
"""Client for interacting with a CortexDB server.
|
|
14
|
+
|
|
15
|
+
Usage::
|
|
16
|
+
|
|
17
|
+
from cortexdb import CortexDB
|
|
18
|
+
|
|
19
|
+
db = CortexDB("http://localhost:8080")
|
|
20
|
+
|
|
21
|
+
# Configure LLM
|
|
22
|
+
db.setup.configure(
|
|
23
|
+
provider="GEMINI",
|
|
24
|
+
api_key="...",
|
|
25
|
+
chat_model="gemini-2.0-flash",
|
|
26
|
+
embed_model="gemini-embedding-001",
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Ingest content
|
|
30
|
+
db.ingest.document(uid="user-1", converser="USER", content="Hello world")
|
|
31
|
+
|
|
32
|
+
# Query
|
|
33
|
+
results = db.query.search_contexts("greeting")
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
base_url: str = "http://localhost:8080",
|
|
39
|
+
timeout: float = 30.0,
|
|
40
|
+
headers: dict[str, str] | None = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
"""Initialize the CortexDB client.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
base_url: Base URL of the CortexDB server.
|
|
46
|
+
timeout: Request timeout in seconds.
|
|
47
|
+
headers: Optional default headers for all requests.
|
|
48
|
+
"""
|
|
49
|
+
self._http = httpx.Client(
|
|
50
|
+
base_url=base_url,
|
|
51
|
+
timeout=timeout,
|
|
52
|
+
headers=headers or {},
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
self.setup = SetupAPI(self._http)
|
|
56
|
+
self.ingest = IngestAPI(self._http)
|
|
57
|
+
self.query = QueryAPI(self._http)
|
|
58
|
+
|
|
59
|
+
def close(self) -> None:
|
|
60
|
+
"""Close the underlying HTTP client."""
|
|
61
|
+
self._http.close()
|
|
62
|
+
|
|
63
|
+
def __enter__(self) -> CortexDB:
|
|
64
|
+
return self
|
|
65
|
+
|
|
66
|
+
def __exit__(self, *args: object) -> None:
|
|
67
|
+
self.close()
|
|
68
|
+
|
|
69
|
+
def __repr__(self) -> str:
|
|
70
|
+
return f"CortexDB(base_url={self._http.base_url!r})"
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Custom exceptions for the CortexDB Python SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CortexDBError(Exception):
|
|
7
|
+
"""Base exception for all CortexDB errors."""
|
|
8
|
+
|
|
9
|
+
pass
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ConnectionError(CortexDBError):
|
|
13
|
+
"""Raised when the SDK cannot connect to the CortexDB server."""
|
|
14
|
+
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class APIError(CortexDBError):
|
|
19
|
+
"""Raised when the CortexDB server returns an error response."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, status_code: int, message: str, detail: str | None = None):
|
|
22
|
+
self.status_code = status_code
|
|
23
|
+
self.detail = detail
|
|
24
|
+
super().__init__(f"HTTP {status_code}: {message}")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class NotFoundError(APIError):
|
|
28
|
+
"""Raised when a requested resource is not found (404)."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, message: str = "Resource not found"):
|
|
31
|
+
super().__init__(status_code=404, message=message)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ValidationError(APIError):
|
|
35
|
+
"""Raised when request validation fails (400)."""
|
|
36
|
+
|
|
37
|
+
def __init__(self, message: str = "Validation failed", detail: str | None = None):
|
|
38
|
+
super().__init__(status_code=400, message=message, detail=detail)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class LLMError(CortexDBError):
|
|
42
|
+
"""Raised when an LLM operation fails."""
|
|
43
|
+
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class LLMNotInitializedError(LLMError):
|
|
48
|
+
"""Raised when LLM provider is used before initialization."""
|
|
49
|
+
|
|
50
|
+
def __init__(self):
|
|
51
|
+
super().__init__("LLM provider not initialized. Call initialize() first.")
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""Ingest API wrapper for CortexDB."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from cortexdb.models import ConverserRole, IngestRequest, IngestResponse
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class IngestAPI:
|
|
14
|
+
"""Wraps the ``/api/ingest`` endpoints."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, http: httpx.Client) -> None:
|
|
17
|
+
self._http = http
|
|
18
|
+
|
|
19
|
+
def document(
|
|
20
|
+
self,
|
|
21
|
+
uid: str,
|
|
22
|
+
converser: str | ConverserRole,
|
|
23
|
+
content: str,
|
|
24
|
+
metadata: dict[str, Any] | None = None,
|
|
25
|
+
) -> IngestResponse:
|
|
26
|
+
"""Ingest a document into CortexDB.
|
|
27
|
+
|
|
28
|
+
The server will chunk the content, generate embeddings,
|
|
29
|
+
and extract entities/relations automatically.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
uid: User identifier.
|
|
33
|
+
converser: Role of the converser (USER, AGENT, or SYSTEM).
|
|
34
|
+
content: The text content to ingest.
|
|
35
|
+
metadata: Optional metadata dict.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
IngestResponse with the created KnowledgeBase and processing info.
|
|
39
|
+
"""
|
|
40
|
+
if isinstance(converser, str):
|
|
41
|
+
converser = ConverserRole(converser.upper())
|
|
42
|
+
|
|
43
|
+
request = IngestRequest(
|
|
44
|
+
uid=uid,
|
|
45
|
+
converser=converser,
|
|
46
|
+
content=content,
|
|
47
|
+
metadata=metadata,
|
|
48
|
+
)
|
|
49
|
+
response = self._http.post(
|
|
50
|
+
"/api/ingest/document",
|
|
51
|
+
json=request.model_dump(exclude_none=True),
|
|
52
|
+
)
|
|
53
|
+
response.raise_for_status()
|
|
54
|
+
return IngestResponse.model_validate(response.json())
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""Native LLM provider — Python re-implementation of callLLM() and getEmbedding()."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from cortexdb.exceptions import LLMError, LLMNotInitializedError
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LLMProvider:
|
|
14
|
+
"""LLM provider that directly calls the Gemini / OpenAI APIs from Python.
|
|
15
|
+
|
|
16
|
+
This is a native re-implementation of the Java ``LLMProvider`` class,
|
|
17
|
+
using the ``google-genai`` SDK for Gemini and the ``openai`` SDK for
|
|
18
|
+
OpenAI/Azure providers.
|
|
19
|
+
|
|
20
|
+
Usage::
|
|
21
|
+
|
|
22
|
+
from cortexdb.llm import LLMProvider
|
|
23
|
+
|
|
24
|
+
llm = LLMProvider(
|
|
25
|
+
provider="GEMINI",
|
|
26
|
+
api_key="your-api-key",
|
|
27
|
+
chat_model="gemini-2.0-flash",
|
|
28
|
+
embed_model="gemini-embedding-001",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
embedding = llm.get_embedding("Hello world")
|
|
32
|
+
response = llm.call_llm("Explain quantum computing")
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
provider: str,
|
|
38
|
+
api_key: str,
|
|
39
|
+
chat_model: str | None = None,
|
|
40
|
+
embed_model: str | None = None,
|
|
41
|
+
base_url: str | None = None,
|
|
42
|
+
) -> None:
|
|
43
|
+
"""Initialize the LLM provider.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
provider: Provider name — "GEMINI", "OPENAI", or "AZURE".
|
|
47
|
+
api_key: API key for authentication.
|
|
48
|
+
chat_model: Name of the chat model.
|
|
49
|
+
embed_model: Name of the embedding model.
|
|
50
|
+
base_url: Custom base URL (optional, mainly for Azure).
|
|
51
|
+
"""
|
|
52
|
+
self.provider = provider.upper()
|
|
53
|
+
self.api_key = api_key
|
|
54
|
+
self.chat_model = chat_model
|
|
55
|
+
self.embed_model = embed_model
|
|
56
|
+
self.base_url = base_url
|
|
57
|
+
|
|
58
|
+
self._chat_client: Any = None
|
|
59
|
+
self._embed_client: Any = None
|
|
60
|
+
|
|
61
|
+
self._initialize()
|
|
62
|
+
|
|
63
|
+
def _initialize(self) -> None:
|
|
64
|
+
"""Set up the underlying SDK clients based on the provider."""
|
|
65
|
+
logger.info(
|
|
66
|
+
"Initializing LLMProvider with provider=%s, chat_model=%s, embed_model=%s",
|
|
67
|
+
self.provider,
|
|
68
|
+
self.chat_model,
|
|
69
|
+
self.embed_model,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
if self.provider == "GEMINI":
|
|
73
|
+
self._init_gemini()
|
|
74
|
+
elif self.provider in ("OPENAI", "AZURE"):
|
|
75
|
+
self._init_openai()
|
|
76
|
+
else:
|
|
77
|
+
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
78
|
+
|
|
79
|
+
def _init_gemini(self) -> None:
|
|
80
|
+
"""Initialize using the google-genai SDK."""
|
|
81
|
+
try:
|
|
82
|
+
from google import genai
|
|
83
|
+
|
|
84
|
+
self._gemini_client = genai.Client(api_key=self.api_key)
|
|
85
|
+
self._chat_client = self._gemini_client
|
|
86
|
+
self._embed_client = self._gemini_client
|
|
87
|
+
logger.info("Gemini client initialized successfully")
|
|
88
|
+
except ImportError:
|
|
89
|
+
raise LLMError(
|
|
90
|
+
"google-genai is required for Gemini provider. "
|
|
91
|
+
"Install it with: pip install google-genai"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def _init_openai(self) -> None:
|
|
95
|
+
"""Initialize using the openai SDK (works for OpenAI and Azure)."""
|
|
96
|
+
try:
|
|
97
|
+
import openai
|
|
98
|
+
|
|
99
|
+
if self.provider == "AZURE":
|
|
100
|
+
self._openai_client = openai.AzureOpenAI(
|
|
101
|
+
api_key=self.api_key,
|
|
102
|
+
azure_endpoint=self.base_url or "",
|
|
103
|
+
api_version="2024-02-01",
|
|
104
|
+
)
|
|
105
|
+
else:
|
|
106
|
+
self._openai_client = openai.OpenAI(
|
|
107
|
+
api_key=self.api_key,
|
|
108
|
+
base_url=self.base_url,
|
|
109
|
+
)
|
|
110
|
+
self._chat_client = self._openai_client
|
|
111
|
+
self._embed_client = self._openai_client
|
|
112
|
+
logger.info("OpenAI client initialized successfully")
|
|
113
|
+
except ImportError:
|
|
114
|
+
raise LLMError(
|
|
115
|
+
"openai is required for OpenAI/Azure provider. "
|
|
116
|
+
"Install it with: pip install openai"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def get_embedding(self, text: str) -> list[float]:
|
|
120
|
+
"""Generate an embedding vector for the given text.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
text: Input text to embed.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
A list of floats representing the embedding vector.
|
|
127
|
+
|
|
128
|
+
Raises:
|
|
129
|
+
LLMNotInitializedError: If the provider is not initialized.
|
|
130
|
+
LLMError: If embedding generation fails.
|
|
131
|
+
"""
|
|
132
|
+
if self._embed_client is None:
|
|
133
|
+
raise LLMNotInitializedError()
|
|
134
|
+
|
|
135
|
+
logger.debug("Generating embedding for text (%d chars)", len(text))
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
if self.provider == "GEMINI":
|
|
139
|
+
response = self._gemini_client.models.embed_content(
|
|
140
|
+
model=self.embed_model or "gemini-embedding-001",
|
|
141
|
+
contents=text,
|
|
142
|
+
)
|
|
143
|
+
return list(response.embeddings[0].values)
|
|
144
|
+
else:
|
|
145
|
+
response = self._openai_client.embeddings.create(
|
|
146
|
+
model=self.embed_model or "text-embedding-ada-002",
|
|
147
|
+
input=text,
|
|
148
|
+
)
|
|
149
|
+
return response.data[0].embedding
|
|
150
|
+
except Exception as e:
|
|
151
|
+
logger.error("Embedding generation failed: %s", e)
|
|
152
|
+
raise LLMError(f"Embedding generation failed: {e}") from e
|
|
153
|
+
|
|
154
|
+
def call_llm(self, prompt: str) -> str:
|
|
155
|
+
"""Send a prompt to the LLM and return the response text.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
prompt: The prompt to send.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
The LLM's text response.
|
|
162
|
+
|
|
163
|
+
Raises:
|
|
164
|
+
LLMNotInitializedError: If the provider is not initialized.
|
|
165
|
+
LLMError: If the LLM call fails.
|
|
166
|
+
"""
|
|
167
|
+
if self._chat_client is None:
|
|
168
|
+
raise LLMNotInitializedError()
|
|
169
|
+
|
|
170
|
+
logger.debug("Calling LLM with prompt (%d chars)", len(prompt))
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
if self.provider == "GEMINI":
|
|
174
|
+
response = self._gemini_client.models.generate_content(
|
|
175
|
+
model=self.chat_model or "gemini-2.0-flash",
|
|
176
|
+
contents=prompt,
|
|
177
|
+
)
|
|
178
|
+
return response.text
|
|
179
|
+
else:
|
|
180
|
+
response = self._openai_client.chat.completions.create(
|
|
181
|
+
model=self.chat_model or "gpt-4",
|
|
182
|
+
messages=[{"role": "user", "content": prompt}],
|
|
183
|
+
)
|
|
184
|
+
return response.choices[0].message.content
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.error("LLM call failed: %s", e)
|
|
187
|
+
raise LLMError(f"LLM call failed: {e}") from e
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
"""Pydantic data models mirroring the Java DTOs."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from typing import Any
|
|
8
|
+
from uuid import UUID
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ─── Enums ───────────────────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LLMApiProvider(str, Enum):
|
|
17
|
+
OPENAI = "OPENAI"
|
|
18
|
+
AZURE = "AZURE"
|
|
19
|
+
OLLAMA = "OLLAMA"
|
|
20
|
+
ANTHROPIC = "ANTHROPIC"
|
|
21
|
+
GEMINI = "GEMINI"
|
|
22
|
+
MISTRAL = "MISTRAL"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ConverserRole(str, Enum):
|
|
26
|
+
USER = "USER" #represents content from a human user.
|
|
27
|
+
AGENT = "AGENT"# represents content generated by the AI aasistant.
|
|
28
|
+
SYSTEM = "SYSTEM"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ─── Setup ───────────────────────────────────────────────────────────
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SetupRequest(BaseModel):
|
|
35
|
+
provider: LLMApiProvider
|
|
36
|
+
api_key: str | None = Field(default=None, alias="apiKey")
|
|
37
|
+
chat_model_name: str = Field(alias="chatModelName")
|
|
38
|
+
embed_model_name: str = Field(alias="embedModelName")
|
|
39
|
+
base_url: str | None = Field(default=None, alias="baseUrl")
|
|
40
|
+
|
|
41
|
+
model_config = {"populate_by_name": True}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class SetupResponse(BaseModel):
|
|
45
|
+
message: str
|
|
46
|
+
success: bool
|
|
47
|
+
configured_provider: str | None = Field(default=None, alias="configuredProvider")
|
|
48
|
+
configured_chat_model: str | None = Field(
|
|
49
|
+
default=None, alias="configuredChatModel"
|
|
50
|
+
)
|
|
51
|
+
configured_embed_model: str | None = Field(
|
|
52
|
+
default=None, alias="configuredEmbedModel"
|
|
53
|
+
)
|
|
54
|
+
base_url: str | None = Field(default=None, alias="baseUrl")
|
|
55
|
+
timestamp: datetime | None = None
|
|
56
|
+
|
|
57
|
+
model_config = {"populate_by_name": True}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ─── Ingest ──────────────────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class IngestRequest(BaseModel):
|
|
64
|
+
uid: str
|
|
65
|
+
converser: ConverserRole
|
|
66
|
+
content: str
|
|
67
|
+
metadata: dict[str, Any] | None = None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class KnowledgeBase(BaseModel):
|
|
71
|
+
"""Simplified KnowledgeBase entity returned in ingest responses."""
|
|
72
|
+
|
|
73
|
+
id: UUID | None = None
|
|
74
|
+
uid: str | None = None
|
|
75
|
+
content: str | None = None
|
|
76
|
+
created_at: datetime | None = Field(default=None, alias="createdAt")
|
|
77
|
+
|
|
78
|
+
model_config = {"populate_by_name": True}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class IngestResponse(BaseModel):
|
|
82
|
+
knowledge_base: KnowledgeBase | None = Field(
|
|
83
|
+
default=None, alias="knowledgeBase"
|
|
84
|
+
)
|
|
85
|
+
status: str | None = None
|
|
86
|
+
message: str | None = None
|
|
87
|
+
processing_time_ms: int | None = Field(default=None, alias="processingTimeMs")
|
|
88
|
+
embedding_time_ms: int | None = Field(default=None, alias="embeddingTimeMs")
|
|
89
|
+
|
|
90
|
+
model_config = {"populate_by_name": True}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# ─── Query ───────────────────────────────────────────────────────────
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class QueryRequest(BaseModel):
|
|
97
|
+
query: str
|
|
98
|
+
limit: int = 5
|
|
99
|
+
min_relevance: float = Field(default=0.7, alias="minRelevance")
|
|
100
|
+
filters: dict[str, Any] | None = None
|
|
101
|
+
|
|
102
|
+
model_config = {"populate_by_name": True}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class SearchResult(BaseModel):
|
|
106
|
+
id: UUID | None = None
|
|
107
|
+
content: str | None = None
|
|
108
|
+
score: float | None = None
|
|
109
|
+
type: str | None = None
|
|
110
|
+
metadata: dict[str, Any] | None = None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class QueryResponse(BaseModel):
|
|
114
|
+
query: str | None = None
|
|
115
|
+
results: list[SearchResult] = Field(default_factory=list)
|
|
116
|
+
processing_time_ms: int | None = Field(default=None, alias="processingTimeMs")
|
|
117
|
+
|
|
118
|
+
model_config = {"populate_by_name": True}
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
# ─── Entity / Relation ───────────────────────────────────────────────
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class Entity(BaseModel):
|
|
125
|
+
"""RagEntity returned from entity endpoints."""
|
|
126
|
+
|
|
127
|
+
id: UUID | None = None
|
|
128
|
+
name: str | None = None
|
|
129
|
+
type: str | None = None
|
|
130
|
+
description: str | None = None
|
|
131
|
+
embedding: list[float] | None = None
|
|
132
|
+
|
|
133
|
+
model_config = {"populate_by_name": True}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class Relation(BaseModel):
|
|
137
|
+
"""Relation between two entities."""
|
|
138
|
+
|
|
139
|
+
id: UUID | None = None
|
|
140
|
+
source_entity_id: UUID | None = Field(default=None, alias="sourceEntityId")
|
|
141
|
+
target_entity_id: UUID | None = Field(default=None, alias="targetEntityId")
|
|
142
|
+
relation_type: str | None = Field(default=None, alias="relationType")
|
|
143
|
+
weight: float | None = None
|
|
144
|
+
|
|
145
|
+
model_config = {"populate_by_name": True}
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""Query API wrapper for CortexDB."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
from uuid import UUID
|
|
7
|
+
|
|
8
|
+
from cortexdb.exceptions import NotFoundError
|
|
9
|
+
from cortexdb.models import Entity, QueryRequest, QueryResponse, Relation
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
import httpx
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QueryAPI:
|
|
16
|
+
"""Wraps the ``/api/query`` endpoints."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, http: httpx.Client) -> None:
|
|
19
|
+
self._http = http
|
|
20
|
+
|
|
21
|
+
# ── Context endpoints ────────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
def search_contexts(
|
|
24
|
+
self,
|
|
25
|
+
query: str,
|
|
26
|
+
limit: int = 5,
|
|
27
|
+
min_relevance: float = 0.7,
|
|
28
|
+
filters: dict[str, Any] | None = None,
|
|
29
|
+
) -> QueryResponse:
|
|
30
|
+
"""Semantic search on contexts.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
query: Search query text.
|
|
34
|
+
limit: Maximum number of results.
|
|
35
|
+
min_relevance: Minimum relevance score (0-1).
|
|
36
|
+
filters: Optional filters dict.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
QueryResponse with matching contexts.
|
|
40
|
+
"""
|
|
41
|
+
return self._post_query("/api/query/contexts", query, limit, min_relevance, filters)
|
|
42
|
+
|
|
43
|
+
def get_contexts_by_kb(self, kb_id: UUID | str) -> QueryResponse:
|
|
44
|
+
"""Get all contexts for a knowledge base."""
|
|
45
|
+
resp = self._http.get(f"/api/query/contexts/kb/{kb_id}")
|
|
46
|
+
resp.raise_for_status()
|
|
47
|
+
return QueryResponse.model_validate(resp.json())
|
|
48
|
+
|
|
49
|
+
def get_recent_contexts(self, days: int = 7) -> QueryResponse:
|
|
50
|
+
"""Get recent contexts from the last N days."""
|
|
51
|
+
resp = self._http.get("/api/query/contexts/recent", params={"days": days})
|
|
52
|
+
resp.raise_for_status()
|
|
53
|
+
return QueryResponse.model_validate(resp.json())
|
|
54
|
+
|
|
55
|
+
def search_recent_contexts(
|
|
56
|
+
self,
|
|
57
|
+
query: str,
|
|
58
|
+
days: int = 7,
|
|
59
|
+
limit: int = 5,
|
|
60
|
+
min_relevance: float = 0.7,
|
|
61
|
+
) -> QueryResponse:
|
|
62
|
+
"""Search recent contexts with vector similarity."""
|
|
63
|
+
return self._post_query(
|
|
64
|
+
"/api/query/contexts/recent/search",
|
|
65
|
+
query, limit, min_relevance,
|
|
66
|
+
params={"days": days},
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
def get_sibling_contexts(self, context_id: UUID | str) -> QueryResponse:
|
|
70
|
+
"""Get sibling contexts (other chunks from same document)."""
|
|
71
|
+
resp = self._http.get(f"/api/query/contexts/{context_id}/siblings")
|
|
72
|
+
resp.raise_for_status()
|
|
73
|
+
return QueryResponse.model_validate(resp.json())
|
|
74
|
+
|
|
75
|
+
# ── Entity endpoints ─────────────────────────────────────────────
|
|
76
|
+
|
|
77
|
+
def search_entities(
|
|
78
|
+
self,
|
|
79
|
+
query: str,
|
|
80
|
+
limit: int = 5,
|
|
81
|
+
min_relevance: float = 0.7,
|
|
82
|
+
) -> QueryResponse:
|
|
83
|
+
"""Semantic search on entities."""
|
|
84
|
+
return self._post_query("/api/query/entities", query, limit, min_relevance)
|
|
85
|
+
|
|
86
|
+
def get_entity_by_name(self, name: str) -> Entity | None:
|
|
87
|
+
"""Find entity by exact name. Returns None if not found."""
|
|
88
|
+
resp = self._http.get(f"/api/query/entities/name/{name}")
|
|
89
|
+
if resp.status_code == 404:
|
|
90
|
+
return None
|
|
91
|
+
resp.raise_for_status()
|
|
92
|
+
return Entity.model_validate(resp.json())
|
|
93
|
+
|
|
94
|
+
def get_entity_by_name_ignore_case(self, name: str) -> Entity | None:
|
|
95
|
+
"""Find entity by name (case-insensitive). Returns None if not found."""
|
|
96
|
+
resp = self._http.get(f"/api/query/entities/name/{name}/ignorecase")
|
|
97
|
+
if resp.status_code == 404:
|
|
98
|
+
return None
|
|
99
|
+
resp.raise_for_status()
|
|
100
|
+
return Entity.model_validate(resp.json())
|
|
101
|
+
|
|
102
|
+
def get_entity_id_by_name(self, name: str) -> UUID | None:
|
|
103
|
+
"""Get entity ID by name. Returns None if not found."""
|
|
104
|
+
resp = self._http.get(f"/api/query/entities/id/{name}")
|
|
105
|
+
if resp.status_code == 404:
|
|
106
|
+
return None
|
|
107
|
+
resp.raise_for_status()
|
|
108
|
+
data = resp.json()
|
|
109
|
+
return UUID(str(data.get("id")))
|
|
110
|
+
|
|
111
|
+
def disambiguate_entity(self, entity_name: str, context_text: str) -> Entity | None:
|
|
112
|
+
"""Disambiguate entity using vector similarity with context."""
|
|
113
|
+
resp = self._http.post(
|
|
114
|
+
"/api/query/entities/disambiguate",
|
|
115
|
+
params={"entityName": entity_name},
|
|
116
|
+
content=context_text,
|
|
117
|
+
headers={"Content-Type": "text/plain"},
|
|
118
|
+
)
|
|
119
|
+
if resp.status_code == 404:
|
|
120
|
+
return None
|
|
121
|
+
resp.raise_for_status()
|
|
122
|
+
return Entity.model_validate(resp.json())
|
|
123
|
+
|
|
124
|
+
def get_contexts_for_entity(self, entity_id: UUID | str) -> list[Any]:
|
|
125
|
+
"""Get all contexts where an entity is mentioned."""
|
|
126
|
+
resp = self._http.get(f"/api/query/entities/{entity_id}/contexts")
|
|
127
|
+
resp.raise_for_status()
|
|
128
|
+
return resp.json()
|
|
129
|
+
|
|
130
|
+
def get_entities_for_context(self, context_id: UUID | str) -> list[Entity]:
|
|
131
|
+
"""Get all entities mentioned in a context."""
|
|
132
|
+
resp = self._http.get(f"/api/query/contexts/{context_id}/entities")
|
|
133
|
+
resp.raise_for_status()
|
|
134
|
+
return [Entity.model_validate(e) for e in resp.json()]
|
|
135
|
+
|
|
136
|
+
def merge_entities(self, source_entity_id: UUID | str, target_entity_id: UUID | str) -> None:
|
|
137
|
+
"""Merge two entities (source into target)."""
|
|
138
|
+
resp = self._http.post(
|
|
139
|
+
"/api/query/entities/merge",
|
|
140
|
+
params={
|
|
141
|
+
"sourceEntityId": str(source_entity_id),
|
|
142
|
+
"targetEntityId": str(target_entity_id),
|
|
143
|
+
},
|
|
144
|
+
)
|
|
145
|
+
resp.raise_for_status()
|
|
146
|
+
|
|
147
|
+
# ── History endpoints ────────────────────────────────────────────
|
|
148
|
+
|
|
149
|
+
def search_history(
|
|
150
|
+
self,
|
|
151
|
+
query: str,
|
|
152
|
+
limit: int = 5,
|
|
153
|
+
min_relevance: float = 0.7,
|
|
154
|
+
) -> QueryResponse:
|
|
155
|
+
"""Semantic search on knowledge bases (history)."""
|
|
156
|
+
return self._post_query("/api/query/history", query, limit, min_relevance)
|
|
157
|
+
|
|
158
|
+
def get_history_by_user(self, uid: str) -> QueryResponse:
|
|
159
|
+
"""Get all history for a user."""
|
|
160
|
+
resp = self._http.get(f"/api/query/history/user/{uid}")
|
|
161
|
+
resp.raise_for_status()
|
|
162
|
+
return QueryResponse.model_validate(resp.json())
|
|
163
|
+
|
|
164
|
+
def get_recent_kbs(self, hours: int = 24) -> QueryResponse:
|
|
165
|
+
"""Get recent knowledge bases from the last N hours."""
|
|
166
|
+
resp = self._http.get("/api/query/history/recent", params={"hours": hours})
|
|
167
|
+
resp.raise_for_status()
|
|
168
|
+
return QueryResponse.model_validate(resp.json())
|
|
169
|
+
|
|
170
|
+
def get_kbs_since(self, since: str) -> QueryResponse:
|
|
171
|
+
"""Get knowledge bases since a timestamp (ISO-8601)."""
|
|
172
|
+
resp = self._http.get("/api/query/history/since", params={"since": since})
|
|
173
|
+
resp.raise_for_status()
|
|
174
|
+
return QueryResponse.model_validate(resp.json())
|
|
175
|
+
|
|
176
|
+
# ── User data ────────────────────────────────────────────────────
|
|
177
|
+
|
|
178
|
+
def delete_user_data(self, uid: str) -> None:
|
|
179
|
+
"""Delete all data for a user (GDPR compliance)."""
|
|
180
|
+
resp = self._http.delete(f"/api/query/user/{uid}")
|
|
181
|
+
resp.raise_for_status()
|
|
182
|
+
|
|
183
|
+
# ── Graph endpoints ──────────────────────────────────────────────
|
|
184
|
+
|
|
185
|
+
def get_outgoing_connections(self, entity_id: UUID | str) -> QueryResponse:
|
|
186
|
+
"""Get outgoing relations for an entity."""
|
|
187
|
+
resp = self._http.get(f"/api/query/graph/outgoing/{entity_id}")
|
|
188
|
+
resp.raise_for_status()
|
|
189
|
+
return QueryResponse.model_validate(resp.json())
|
|
190
|
+
|
|
191
|
+
def get_incoming_connections(self, entity_id: UUID | str) -> QueryResponse:
|
|
192
|
+
"""Get incoming relations for an entity."""
|
|
193
|
+
resp = self._http.get(f"/api/query/graph/incoming/{entity_id}")
|
|
194
|
+
resp.raise_for_status()
|
|
195
|
+
return QueryResponse.model_validate(resp.json())
|
|
196
|
+
|
|
197
|
+
def get_two_hop_connections(self, entity_id: UUID | str) -> list[str]:
|
|
198
|
+
"""Get 2-hop connections (entity names reachable in 2 hops)."""
|
|
199
|
+
resp = self._http.get(f"/api/query/graph/two-hop/{entity_id}")
|
|
200
|
+
resp.raise_for_status()
|
|
201
|
+
return resp.json()
|
|
202
|
+
|
|
203
|
+
def get_top_relations(self, limit: int = 10) -> list[Relation]:
|
|
204
|
+
"""Get top/strongest relations."""
|
|
205
|
+
resp = self._http.get("/api/query/graph/top-relations", params={"limit": limit})
|
|
206
|
+
resp.raise_for_status()
|
|
207
|
+
return [Relation.model_validate(r) for r in resp.json()]
|
|
208
|
+
|
|
209
|
+
def get_relations_by_source(self, source_id: UUID | str) -> list[Relation]:
|
|
210
|
+
"""Get relations by source entity."""
|
|
211
|
+
resp = self._http.get(f"/api/query/graph/relations/source/{source_id}")
|
|
212
|
+
resp.raise_for_status()
|
|
213
|
+
return [Relation.model_validate(r) for r in resp.json()]
|
|
214
|
+
|
|
215
|
+
def get_relations_by_target(self, target_id: UUID | str) -> list[Relation]:
|
|
216
|
+
"""Get relations by target entity."""
|
|
217
|
+
resp = self._http.get(f"/api/query/graph/relations/target/{target_id}")
|
|
218
|
+
resp.raise_for_status()
|
|
219
|
+
return [Relation.model_validate(r) for r in resp.json()]
|
|
220
|
+
|
|
221
|
+
def get_relations_by_type(self, relation_type: str) -> list[Relation]:
|
|
222
|
+
"""Get relations by type."""
|
|
223
|
+
resp = self._http.get(f"/api/query/graph/relations/type/{relation_type}")
|
|
224
|
+
resp.raise_for_status()
|
|
225
|
+
return [Relation.model_validate(r) for r in resp.json()]
|
|
226
|
+
|
|
227
|
+
# ── Hybrid search ────────────────────────────────────────────────
|
|
228
|
+
|
|
229
|
+
def hybrid_search(
|
|
230
|
+
self,
|
|
231
|
+
query: str,
|
|
232
|
+
limit: int = 5,
|
|
233
|
+
min_relevance: float = 0.7,
|
|
234
|
+
) -> QueryResponse:
|
|
235
|
+
"""Hybrid search combining vector and graph results."""
|
|
236
|
+
return self._post_query("/api/query/hybrid", query, limit, min_relevance)
|
|
237
|
+
|
|
238
|
+
# ── Internal helpers ─────────────────────────────────────────────
|
|
239
|
+
|
|
240
|
+
def _post_query(
|
|
241
|
+
self,
|
|
242
|
+
url: str,
|
|
243
|
+
query: str,
|
|
244
|
+
limit: int = 5,
|
|
245
|
+
min_relevance: float = 0.7,
|
|
246
|
+
filters: dict[str, Any] | None = None,
|
|
247
|
+
params: dict[str, Any] | None = None,
|
|
248
|
+
) -> QueryResponse:
|
|
249
|
+
"""Send a POST query request and parse the response."""
|
|
250
|
+
request = QueryRequest(
|
|
251
|
+
query=query,
|
|
252
|
+
limit=limit,
|
|
253
|
+
minRelevance=min_relevance,
|
|
254
|
+
filters=filters,
|
|
255
|
+
)
|
|
256
|
+
resp = self._http.post(
|
|
257
|
+
url,
|
|
258
|
+
json=request.model_dump(by_alias=True, exclude_none=True),
|
|
259
|
+
params=params,
|
|
260
|
+
)
|
|
261
|
+
resp.raise_for_status()
|
|
262
|
+
return QueryResponse.model_validate(resp.json())
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""Setup API wrapper for CortexDB."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from cortexdb.models import LLMApiProvider, SetupRequest, SetupResponse
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SetupAPI:
|
|
14
|
+
"""Wraps the ``/api/setup`` endpoint."""
|
|
15
|
+
|
|
16
|
+
def __init__(self, http: httpx.Client) -> None:
|
|
17
|
+
self._http = http
|
|
18
|
+
|
|
19
|
+
def configure(
|
|
20
|
+
self,
|
|
21
|
+
provider: str | LLMApiProvider,
|
|
22
|
+
chat_model: str,
|
|
23
|
+
embed_model: str,
|
|
24
|
+
api_key: str | None = None,
|
|
25
|
+
base_url: str | None = None,
|
|
26
|
+
) -> SetupResponse:
|
|
27
|
+
"""Configure the LLM provider on the CortexDB server.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
provider: LLM provider name (e.g. "GEMINI", "OPENAI", "AZURE").
|
|
31
|
+
chat_model: Name of the chat model.
|
|
32
|
+
embed_model: Name of the embedding model.
|
|
33
|
+
api_key: API key for the provider.
|
|
34
|
+
base_url: Custom base URL (optional).
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
SetupResponse with configuration details.
|
|
38
|
+
"""
|
|
39
|
+
if isinstance(provider, str):
|
|
40
|
+
provider = LLMApiProvider(provider.upper())
|
|
41
|
+
|
|
42
|
+
request = SetupRequest(
|
|
43
|
+
provider=provider,
|
|
44
|
+
apiKey=api_key,
|
|
45
|
+
chatModelName=chat_model,
|
|
46
|
+
embedModelName=embed_model,
|
|
47
|
+
baseUrl=base_url,
|
|
48
|
+
)
|
|
49
|
+
response = self._http.post(
|
|
50
|
+
"/api/setup",
|
|
51
|
+
json=request.model_dump(by_alias=True, exclude_none=True),
|
|
52
|
+
)
|
|
53
|
+
response.raise_for_status()
|
|
54
|
+
return SetupResponse.model_validate(response.json())
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "cortexdb-py"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Python SDK for CortexDB — a RAG-powered memory database"
|
|
5
|
+
authors = ["VectorNode"]
|
|
6
|
+
readme = "README.md"
|
|
7
|
+
packages = [{include = "cortexdb"}]
|
|
8
|
+
|
|
9
|
+
[tool.poetry.dependencies]
|
|
10
|
+
python = "^3.9"
|
|
11
|
+
httpx = "^0.28.0"
|
|
12
|
+
pydantic = "^2.0"
|
|
13
|
+
google-genai = "^1.0.0"
|
|
14
|
+
python-dotenv = "^1.0.0"
|
|
15
|
+
eval-type-backport = "^0.3.1"
|
|
16
|
+
|
|
17
|
+
[tool.poetry.group.dev.dependencies]
|
|
18
|
+
pytest = "^8.0"
|
|
19
|
+
pytest-asyncio = "^0.25.0"
|
|
20
|
+
respx = "^0.22.0"
|
|
21
|
+
|
|
22
|
+
[build-system]
|
|
23
|
+
requires = ["poetry-core"]
|
|
24
|
+
build-backend = "poetry.core.masonry.api"
|
|
25
|
+
|
|
26
|
+
[tool.pytest.ini_options]
|
|
27
|
+
testpaths = ["tests"]
|