crewplus 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crewplus might be problematic. Click here for more details.
- crewplus/vectorstores/milvus/vdb_service.py +45 -11
- {crewplus-0.2.2.dist-info → crewplus-0.2.4.dist-info}/METADATA +1 -1
- {crewplus-0.2.2.dist-info → crewplus-0.2.4.dist-info}/RECORD +10 -6
- docs/GeminiChatModel.md +226 -0
- docs/ModelLoadBalancer.md +90 -0
- docs/VDBService.md +223 -0
- docs/index.md +23 -0
- {crewplus-0.2.2.dist-info → crewplus-0.2.4.dist-info}/WHEEL +0 -0
- {crewplus-0.2.2.dist-info → crewplus-0.2.4.dist-info}/entry_points.txt +0 -0
- {crewplus-0.2.2.dist-info → crewplus-0.2.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -22,17 +22,22 @@ class VDBService(object):
|
|
|
22
22
|
and provides helper methods to get embedding functions and vector store instances.
|
|
23
23
|
|
|
24
24
|
Args:
|
|
25
|
-
settings (dict): A dictionary containing configuration for the vector store
|
|
25
|
+
settings (dict, optional): A dictionary containing configuration for the vector store
|
|
26
26
|
and embedding models.
|
|
27
|
+
endpoint (str, optional): The URI for the Zilliz cluster. Can be used for simple
|
|
28
|
+
initialization instead of `settings`.
|
|
29
|
+
token (str, optional): The token for authenticating with Zilliz. Must be provided
|
|
30
|
+
with `endpoint`.
|
|
27
31
|
schema (str, optional): The schema definition for a collection. Defaults to None.
|
|
28
32
|
logger (logging.Logger, optional): An optional logger instance. Defaults to None.
|
|
29
33
|
|
|
30
34
|
Raises:
|
|
31
|
-
ValueError: If required configurations are missing
|
|
35
|
+
ValueError: If required configurations are missing.
|
|
32
36
|
NotImplementedError: If an unsupported provider is specified.
|
|
33
37
|
RuntimeError: If the MilvusClient fails to initialize after a retry.
|
|
34
38
|
|
|
35
39
|
Example:
|
|
40
|
+
>>> # Initialize with a full settings dictionary
|
|
36
41
|
>>> settings = {
|
|
37
42
|
... "embedder": {
|
|
38
43
|
... "provider": "azure-openai",
|
|
@@ -61,6 +66,10 @@ class VDBService(object):
|
|
|
61
66
|
... }
|
|
62
67
|
... }
|
|
63
68
|
>>> vdb_service = VDBService(settings=settings)
|
|
69
|
+
>>>
|
|
70
|
+
>>> # Alternatively, initialize with an endpoint and token for Zilliz
|
|
71
|
+
>>> # vdb_service_zilliz = VDBService(endpoint="YOUR_ZILLIZ_ENDPOINT", token="YOUR_ZILLIZ_TOKEN")
|
|
72
|
+
>>>
|
|
64
73
|
>>> # Get the raw Milvus client
|
|
65
74
|
>>> client = vdb_service.get_vector_client()
|
|
66
75
|
>>> print(client.list_collections())
|
|
@@ -82,17 +91,41 @@ class VDBService(object):
|
|
|
82
91
|
connection_args: dict
|
|
83
92
|
settings: dict
|
|
84
93
|
|
|
85
|
-
def __init__(self, settings: dict, schema: str = None, logger: logging.Logger = None):
|
|
94
|
+
def __init__(self, settings: dict = None, endpoint: str = None, token: str = None, schema: str = None, logger: logging.Logger = None):
|
|
86
95
|
"""
|
|
87
96
|
Initializes the VDBService.
|
|
97
|
+
|
|
98
|
+
Can be initialized in two ways:
|
|
99
|
+
1. By providing a full `settings` dictionary for complex configurations.
|
|
100
|
+
2. By providing `endpoint` and `token` for a direct Zilliz connection.
|
|
101
|
+
Note: When using this method, an `embedder` configuration is not created.
|
|
102
|
+
You must either use the `ModelLoadBalancer` or pass an `Embeddings` object
|
|
103
|
+
directly to methods like `get_vector_store`.
|
|
88
104
|
|
|
89
105
|
Args:
|
|
90
|
-
settings (dict): Configuration dictionary for the service.
|
|
106
|
+
settings (dict, optional): Configuration dictionary for the service. Defaults to None.
|
|
107
|
+
endpoint (str, optional): The URI for the Zilliz cluster. Used if `settings` is not provided.
|
|
108
|
+
token (str, optional): The token for authenticating with the Zilliz cluster.
|
|
91
109
|
schema (str, optional): Default schema for new collections. Defaults to None.
|
|
92
110
|
logger (logging.Logger, optional): Logger instance. Defaults to None.
|
|
93
111
|
"""
|
|
94
112
|
self.logger = logger or logging.getLogger(__name__)
|
|
95
|
-
|
|
113
|
+
|
|
114
|
+
if settings:
|
|
115
|
+
self.settings = settings
|
|
116
|
+
elif endpoint and token:
|
|
117
|
+
self.logger.info("Initializing VDBService with endpoint and token for a Zilliz connection.")
|
|
118
|
+
self.settings = {
|
|
119
|
+
"vector_store": {
|
|
120
|
+
"provider": "zilliz",
|
|
121
|
+
"config": {
|
|
122
|
+
"uri": endpoint,
|
|
123
|
+
"token": token
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
else:
|
|
128
|
+
raise ValueError("VDBService must be initialized with either a 'settings' dictionary or both 'endpoint' and 'token'.")
|
|
96
129
|
|
|
97
130
|
vector_store_settings = self.settings.get("vector_store")
|
|
98
131
|
if not vector_store_settings:
|
|
@@ -262,6 +295,7 @@ class VDBService(object):
|
|
|
262
295
|
|
|
263
296
|
Args:
|
|
264
297
|
url (str): source url
|
|
298
|
+
vdb (Zilliz): Zilliz instance
|
|
265
299
|
"""
|
|
266
300
|
self.logger.info(f"Delete old indexes of the same source_url:{url}")
|
|
267
301
|
|
|
@@ -269,7 +303,7 @@ class VDBService(object):
|
|
|
269
303
|
return None
|
|
270
304
|
|
|
271
305
|
# Delete indexes of the same source_url
|
|
272
|
-
expr =
|
|
306
|
+
expr = f'source_url == "{url}" or source == "{url}"'
|
|
273
307
|
pks = vdb.get_pks(expr)
|
|
274
308
|
|
|
275
309
|
# Delete entities by pks
|
|
@@ -278,19 +312,19 @@ class VDBService(object):
|
|
|
278
312
|
self.logger.info("Deleted old indexes result: " + str(res))
|
|
279
313
|
return res
|
|
280
314
|
|
|
281
|
-
def delete_old_indexes_by_id(self,
|
|
315
|
+
def delete_old_indexes_by_id(self, source_id: str = None, vdb: Zilliz = None) -> (bool | None):
|
|
282
316
|
""" Delete old indexes of the same source_id
|
|
283
317
|
|
|
284
318
|
Args:
|
|
285
|
-
|
|
319
|
+
source_id (str): source id
|
|
286
320
|
"""
|
|
287
|
-
self.logger.info(f"Delete old indexes of the same source_id:{
|
|
321
|
+
self.logger.info(f"Delete old indexes of the same source_id:{source_id}")
|
|
288
322
|
|
|
289
|
-
if
|
|
323
|
+
if source_id is None or vdb is None:
|
|
290
324
|
return None
|
|
291
325
|
|
|
292
326
|
# Delete indexes of the same source_id
|
|
293
|
-
expr =
|
|
327
|
+
expr = f'source_id == "{source_id}"'
|
|
294
328
|
pks = vdb.get_pks(expr)
|
|
295
329
|
|
|
296
330
|
# Delete entities by pks
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
crewplus-0.2.
|
|
2
|
-
crewplus-0.2.
|
|
3
|
-
crewplus-0.2.
|
|
4
|
-
crewplus-0.2.
|
|
1
|
+
crewplus-0.2.4.dist-info/METADATA,sha256=GanuWO7TBC2DPca7TZwW3WtVg7rZXpBQ1hWhM22G_-8,4909
|
|
2
|
+
crewplus-0.2.4.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
|
3
|
+
crewplus-0.2.4.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
|
4
|
+
crewplus-0.2.4.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
|
|
5
5
|
crewplus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
6
|
crewplus/services/__init__.py,sha256=MmH2v3N0ZMsuqFNAupkXENjUqvgf5ehQ99H6EzPqLZU,48
|
|
7
7
|
crewplus/services/gemini_chat_model.py,sha256=i9p5KvSJYaHSUBLPKM_bpyGVLWCDQoNeah_WjQVJRXs,26227
|
|
@@ -12,5 +12,9 @@ crewplus/utils/schema_document_updater.py,sha256=frvffxn2vbi71fHFPoGb9hq7gH2azmm
|
|
|
12
12
|
crewplus/vectorstores/milvus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
crewplus/vectorstores/milvus/milvus_schema_manager.py,sha256=qHMVIM0NS3rLfACb8d3-tQS9hJo6_7_YP8AxVx4t1Cc,9019
|
|
14
14
|
crewplus/vectorstores/milvus/schema_milvus.py,sha256=GhHTtCH5HsIJc3RHa25RXl3aZdkS3Rba5KeuUk_Hi0k,11425
|
|
15
|
-
crewplus/vectorstores/milvus/vdb_service.py,sha256=
|
|
16
|
-
|
|
15
|
+
crewplus/vectorstores/milvus/vdb_service.py,sha256=J7B6TOZmJl9_K2euJFKJFvSYqvruKbXuYkFiugWnXXs,16657
|
|
16
|
+
docs/GeminiChatModel.md,sha256=_IQyup3ofAa2HxfSurO1GYUEezTHYYt5Q1khYNVThGM,8040
|
|
17
|
+
docs/ModelLoadBalancer.md,sha256=mgwDtiKBlAJMBhXck97SPahCt395QJzHyrKmxmkfRtw,3082
|
|
18
|
+
docs/VDBService.md,sha256=YwYpyYsZ-YkLD8WQjFYAHmEkPmVheTKUEJn0mVqrirA,8945
|
|
19
|
+
docs/index.md,sha256=3tlc15uR8lzFNM5WjdoZLw0Y9o1P1gwgbEnOdIBspqc,1643
|
|
20
|
+
crewplus-0.2.4.dist-info/RECORD,,
|
docs/GeminiChatModel.md
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
# GeminiChatModel Documentation
|
|
2
|
+
|
|
3
|
+
## 1. Introduction
|
|
4
|
+
|
|
5
|
+
The `GeminiChatModel` is a custom LangChain-compatible chat model that provides a robust interface to Google's Gemini Pro and Flash models. It is designed to handle multimodal inputs, including text, images, and videos, making it a versatile tool for building advanced AI applications.
|
|
6
|
+
|
|
7
|
+
### Key Features:
|
|
8
|
+
- **LangChain Compatibility**: Seamlessly integrates into the LangChain ecosystem as a `BaseChatModel`.
|
|
9
|
+
- **Multimodal Support**: Natively processes text, images (from URLs, local paths, or base64), and videos (from local paths, Google Cloud URIs, or raw bytes).
|
|
10
|
+
- **Streaming**: Supports streaming for both standard and multimodal responses.
|
|
11
|
+
- **Advanced Configuration**: Allows fine-tuning of generation parameters like temperature, top-p, top-k, and max tokens.
|
|
12
|
+
- **Video Segment Analysis**: Can process specific time ranges within a video using start and end offsets.
|
|
13
|
+
|
|
14
|
+
## 2. Installation
|
|
15
|
+
|
|
16
|
+
To use the `GeminiChatModel`, you need to install the `crewplus` package. If you are working within the project repository, you can install it in editable mode:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
pip install crewplus
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## 3. Initialization
|
|
23
|
+
|
|
24
|
+
First, ensure you have set your Google API key as an environment variable:
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
# For Linux/macOS
|
|
28
|
+
export GOOGLE_API_KEY="YOUR_API_KEY"
|
|
29
|
+
|
|
30
|
+
# For Windows PowerShell
|
|
31
|
+
$env:GEMINI_API_KEY = "YOUR_API_KEY"
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
Then, you can import and initialize the model in your Python code.
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
import logging
|
|
38
|
+
from crewplus.services import GeminiChatModel
|
|
39
|
+
from langchain_core.messages import HumanMessage
|
|
40
|
+
|
|
41
|
+
# Optional: Configure a logger for detailed output
|
|
42
|
+
logging.basicConfig(level=logging.INFO)
|
|
43
|
+
test_logger = logging.getLogger(__name__)
|
|
44
|
+
|
|
45
|
+
# Initialize the model
|
|
46
|
+
# You can also pass the google_api_key directly as a parameter
|
|
47
|
+
model = GeminiChatModel(
|
|
48
|
+
model_name="gemini-2.5-flash", # Or "gemini-1.5-pro"
|
|
49
|
+
logger=test_logger,
|
|
50
|
+
temperature=0.0,
|
|
51
|
+
)
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## 4. Basic Usage (Text-only)
|
|
55
|
+
|
|
56
|
+
The model can be used for simple text-based conversations using `.invoke()` or `.stream()`.
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
# Using invoke for a single response
|
|
60
|
+
response = model.invoke("Hello, how are you?")
|
|
61
|
+
print(response.content)
|
|
62
|
+
|
|
63
|
+
# Using stream for a chunked response
|
|
64
|
+
print("\\n--- Streaming Response ---")
|
|
65
|
+
for chunk in model.stream("Tell me a short story."):
|
|
66
|
+
print(chunk.content, end="", flush=True)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## 5. Image Understanding
|
|
70
|
+
|
|
71
|
+
`GeminiChatModel` can understand images provided via a URL or as base64 encoded data.
|
|
72
|
+
|
|
73
|
+
### Example 1: Image from a URL
|
|
74
|
+
|
|
75
|
+
You can provide a direct URL to an image.
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
from langchain_core.messages import HumanMessage
|
|
79
|
+
|
|
80
|
+
url_message = HumanMessage(
|
|
81
|
+
content=[
|
|
82
|
+
{"type": "text", "text": "Describe this image:"},
|
|
83
|
+
{
|
|
84
|
+
"type": "image_url",
|
|
85
|
+
"image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
|
86
|
+
},
|
|
87
|
+
]
|
|
88
|
+
)
|
|
89
|
+
url_response = model.invoke([url_message])
|
|
90
|
+
print("Image response (URL):", url_response.content)
|
|
91
|
+
```
|
|
92
|
+
> **Sample Output:**
|
|
93
|
+
> The image shows a wooden boardwalk stretching into the distance through a field of tall, green grass... The overall impression is one of tranquility and natural beauty.
|
|
94
|
+
|
|
95
|
+
### Example 2: Local Image (Base64)
|
|
96
|
+
|
|
97
|
+
You can also send a local image file by encoding it in base64.
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
import base64
|
|
101
|
+
from langchain_core.messages import HumanMessage
|
|
102
|
+
|
|
103
|
+
image_path = "./notebooks/test_image_202506191.jpg"
|
|
104
|
+
try:
|
|
105
|
+
with open(image_path, "rb") as image_file:
|
|
106
|
+
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
|
|
107
|
+
|
|
108
|
+
image_message = HumanMessage(
|
|
109
|
+
content=[
|
|
110
|
+
{"type": "text", "text": "Describe this photo and its background story."},
|
|
111
|
+
{
|
|
112
|
+
"type": "image_url",
|
|
113
|
+
"image_url": {
|
|
114
|
+
"url": f"data:image/jpeg;base64,{encoded_string}"
|
|
115
|
+
}
|
|
116
|
+
},
|
|
117
|
+
]
|
|
118
|
+
)
|
|
119
|
+
image_response = model.invoke([image_message])
|
|
120
|
+
print("Image response (base64):", image_response.content)
|
|
121
|
+
except FileNotFoundError:
|
|
122
|
+
print(f"Image file not found at {image_path}, skipping base64 example.")
|
|
123
|
+
```
|
|
124
|
+
> **Sample Output:**
|
|
125
|
+
> This image is a movie still from the 2017 Japanese thriller "22 Year Old's Confession: I am the Murderer"... The four women in the photo are the victims of a serial killer...
|
|
126
|
+
|
|
127
|
+
## 6. Video Understanding
|
|
128
|
+
|
|
129
|
+
The model supports video analysis from uploaded files, URIs, and raw bytes.
|
|
130
|
+
|
|
131
|
+
**Important Note:** The Gemini API does **not** support common public video URLs (e.g., YouTube, Loom, or public MP4 links). Videos must be uploaded to Google's servers first to get a processable URI.
|
|
132
|
+
|
|
133
|
+
### Example 1: Large Video File (>20MB)
|
|
134
|
+
|
|
135
|
+
For large videos, you must first upload the file using the `google-genai` client to get a file object.
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
from google import genai
|
|
139
|
+
import os
|
|
140
|
+
from langchain_core.messages import HumanMessage
|
|
141
|
+
|
|
142
|
+
# Initialize the Google GenAI client
|
|
143
|
+
client = genai.Client(api_key=os.environ["GOOGLE_API_KEY"])
|
|
144
|
+
|
|
145
|
+
# Upload the video file
|
|
146
|
+
video_path = "./notebooks/manufacturing_process_tutorial.mp4"
|
|
147
|
+
print("Uploading video... this may take a moment.")
|
|
148
|
+
video_file_obj = client.files.upload(file=video_path)
|
|
149
|
+
print(f"Video uploaded successfully. File name: {video_file_obj.name}")
|
|
150
|
+
|
|
151
|
+
# Use the uploaded file object in the prompt
|
|
152
|
+
video_message = HumanMessage(
|
|
153
|
+
content=[
|
|
154
|
+
{"type": "text", "text": "Summarize this video and provide timestamps for key events."},
|
|
155
|
+
{"type": "video_file", "file": video_file_obj},
|
|
156
|
+
]
|
|
157
|
+
)
|
|
158
|
+
video_response = model.invoke([video_message])
|
|
159
|
+
print("Video response:", video_response.content)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
> **Sample Output:**
|
|
163
|
+
> This video provides a step-by-step guide on how to correct a mis-set sidewall during tire manufacturing...
|
|
164
|
+
> **Timestamps:**
|
|
165
|
+
> * **0:04:** Applying product package to some material
|
|
166
|
+
> * **0:12:** Splice product Together and Prepare some material
|
|
167
|
+
> ...
|
|
168
|
+
|
|
169
|
+
### Example 2: Video with Time Offsets
|
|
170
|
+
|
|
171
|
+
You can analyze just a specific portion of a video by providing a `start_offset` and `end_offset`. This works with video URIs obtained after uploading.
|
|
172
|
+
|
|
173
|
+
```python
|
|
174
|
+
# Assuming 'video_file_obj' is available from the previous step
|
|
175
|
+
video_uri = video_file_obj.uri
|
|
176
|
+
|
|
177
|
+
offset_message = HumanMessage(
|
|
178
|
+
content=[
|
|
179
|
+
{"type": "text", "text": "Transcribe the events in this video segment."},
|
|
180
|
+
{
|
|
181
|
+
"type": "video_file",
|
|
182
|
+
"url": video_uri,
|
|
183
|
+
"start_offset": "5s",
|
|
184
|
+
"end_offset": "30s"
|
|
185
|
+
}
|
|
186
|
+
]
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
print("Streaming response for video segment:")
|
|
190
|
+
for chunk in model.stream([offset_message]):
|
|
191
|
+
print(chunk.content, end="", flush=True)
|
|
192
|
+
```
|
|
193
|
+
> **Sample Output:**
|
|
194
|
+
> This video demonstrates the process of applying Component A/Component B material to an assembly drum in a manufacturing setting...
|
|
195
|
+
> **Transcription:**
|
|
196
|
+
> **0:05 - 0:12:** A worker is shown applying a material...
|
|
197
|
+
> **0:12 - 0:23:** The worker continues to prepare the material on the drum...
|
|
198
|
+
|
|
199
|
+
### Example 3: Small Video File (<20MB)
|
|
200
|
+
|
|
201
|
+
For small videos, you can pass the raw bytes directly without a separate upload step.
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
from langchain_core.messages import HumanMessage
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
with open("./notebooks/product_demo_v1.mp4", "rb") as video_file:
|
|
208
|
+
video_bytes = video_file.read()
|
|
209
|
+
|
|
210
|
+
video_message = HumanMessage(
|
|
211
|
+
content=[
|
|
212
|
+
{"type": "text", "text": "What is happening in this video?"},
|
|
213
|
+
{
|
|
214
|
+
"type": "video_file",
|
|
215
|
+
"data": video_bytes,
|
|
216
|
+
"mime_type": "video/mp4" # Mime type is required for raw data
|
|
217
|
+
},
|
|
218
|
+
]
|
|
219
|
+
)
|
|
220
|
+
video_response = model.invoke([video_message])
|
|
221
|
+
print("Video response (bytes):", video_response.content)
|
|
222
|
+
except FileNotFoundError:
|
|
223
|
+
print("Video file not found.")
|
|
224
|
+
except Exception as e:
|
|
225
|
+
print(f"Video processing with bytes failed: {e}")
|
|
226
|
+
```
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# ModelLoadBalancer Documentation
|
|
2
|
+
|
|
3
|
+
## 1. Introduction
|
|
4
|
+
|
|
5
|
+
The `ModelLoadBalancer` is a utility class designed to manage and provide access to various language models from different providers, such as Azure OpenAI and Google GenAI. It loads model configurations from a JSON file and allows you to retrieve specific models by their deployment name or a combination of provider and type.
|
|
6
|
+
|
|
7
|
+
### Key Features:
|
|
8
|
+
- **Centralized Model Management**: Manage all your model configurations in a single JSON file.
|
|
9
|
+
- **On-demand Model Loading**: Models are instantiated and loaded when requested.
|
|
10
|
+
- **Provider Agnostic**: Supports multiple model providers.
|
|
11
|
+
- **Flexible Retrieval**: Get models by a unique deployment name.
|
|
12
|
+
|
|
13
|
+
## 2. Initialization
|
|
14
|
+
|
|
15
|
+
To use the `ModelLoadBalancer`, you need to initialize it with the path to your model configuration file.
|
|
16
|
+
|
|
17
|
+
```python
|
|
18
|
+
from crewplus.services.model_load_balancer import ModelLoadBalancer
|
|
19
|
+
|
|
20
|
+
# Initialize the balancer with the path to your config file
|
|
21
|
+
config_path = "tests/models_config.json" # Adjust the path as needed
|
|
22
|
+
balancer = ModelLoadBalancer(config_path=config_path)
|
|
23
|
+
|
|
24
|
+
# Load the configurations and instantiate the models
|
|
25
|
+
balancer.load_config()
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## 3. Configuration File
|
|
29
|
+
|
|
30
|
+
The `ModelLoadBalancer` uses a JSON file to configure the available models. Here is an example of what the configuration file looks like. The `deployment_name` is used to retrieve a specific model.
|
|
31
|
+
|
|
32
|
+
```json
|
|
33
|
+
{
|
|
34
|
+
"models": [
|
|
35
|
+
{
|
|
36
|
+
"id": 3,
|
|
37
|
+
"provider": "azure-openai",
|
|
38
|
+
"type": "inference",
|
|
39
|
+
"deployment_name": "gpt-4.1",
|
|
40
|
+
"api_version": "2025-01-01-preview",
|
|
41
|
+
"api_base": "https://crewplus-eastus2.openai.azure.com",
|
|
42
|
+
"api_key": "your-api-key"
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"id": 7,
|
|
46
|
+
"provider": "google-genai",
|
|
47
|
+
"type": "inference",
|
|
48
|
+
"deployment_name": "gemini-2.5-flash",
|
|
49
|
+
"api_key": "your-google-api-key"
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"id": 8,
|
|
53
|
+
"provider": "google-genai",
|
|
54
|
+
"type": "ingestion",
|
|
55
|
+
"deployment_name": "gemini-2.5-pro",
|
|
56
|
+
"api_key": "your-google-api-key"
|
|
57
|
+
}
|
|
58
|
+
]
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## 4. Getting a Model
|
|
63
|
+
|
|
64
|
+
You can retrieve a model instance using the `get_model` method and passing the `deployment_name`.
|
|
65
|
+
|
|
66
|
+
### Get `gemini-2.5-flash`
|
|
67
|
+
```python
|
|
68
|
+
gemini_flash_model = balancer.get_model(deployment_name="gemini-2.5-flash")
|
|
69
|
+
|
|
70
|
+
# Now you can use the model
|
|
71
|
+
# from langchain_core.messages import HumanMessage
|
|
72
|
+
# response = gemini_flash_model.invoke([HumanMessage(content="Hello!")])
|
|
73
|
+
# print(response.content)
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Get `gemini-2.5-pro`
|
|
77
|
+
```python
|
|
78
|
+
gemini_pro_model = balancer.get_model(deployment_name="gemini-2.5-pro")
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Get `gpt-4.1`
|
|
82
|
+
```python
|
|
83
|
+
gpt41_model = balancer.get_model(deployment_name="gpt-4.1")
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
### Get `o3mini`
|
|
87
|
+
The model `o3mini` is identified by the deployment name `gpt-o3mini-eastus2-RPM25`.
|
|
88
|
+
```python
|
|
89
|
+
o3mini_model = balancer.get_model(deployment_name="gpt-o3mini-eastus2-RPM25")
|
|
90
|
+
```
|
docs/VDBService.md
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# VDBService Documentation
|
|
2
|
+
|
|
3
|
+
## 1. Introduction
|
|
4
|
+
|
|
5
|
+
The `VDBService` is a centralized service class designed to manage connections to vector databases (Milvus and Zilliz) and handle the instantiation of embedding models. It simplifies interactions with your vector store by reading all necessary configurations from a single `settings` object.
|
|
6
|
+
|
|
7
|
+
### Key Features:
|
|
8
|
+
- **Centralized Configuration**: Manages database connections and embedding model settings from a single Python dictionary.
|
|
9
|
+
- **Provider-Agnostic Client**: Supports both Milvus and Zilliz as vector store providers.
|
|
10
|
+
- **Resilient Connection**: Includes a built-in retry mechanism when first connecting to the vector database.
|
|
11
|
+
- **Instance Caching**: Caches `Zilliz` vector store instances by collection name to prevent re-instantiation and improve performance.
|
|
12
|
+
- **Flexible Embedding Models**: Can retrieve embedding models from either the global `ModelLoadBalancer` or directly from the configuration settings.
|
|
13
|
+
|
|
14
|
+
## 2. Initialization
|
|
15
|
+
|
|
16
|
+
To use the `VDBService`, you must first prepare a `settings` dictionary containing the configuration for your vector store and embedding provider. You then pass this dictionary to the service's constructor.
|
|
17
|
+
|
|
18
|
+
If you plan to use embedding models from the global `ModelLoadBalancer`, you must initialize it first.
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
from crewplus.vectorstores.milvus.vdb_service import VDBService
|
|
22
|
+
from crewplus.services.init_services import init_load_balancer
|
|
23
|
+
|
|
24
|
+
# 1. (Optional) Initialize the global model load balancer if you plan to use it.
|
|
25
|
+
# This should be done once when your application starts.
|
|
26
|
+
init_load_balancer(config_path="path/to/your/models_config.json")
|
|
27
|
+
|
|
28
|
+
# 2. Define the configuration for the VDBService
|
|
29
|
+
settings = {
|
|
30
|
+
"embedder": {
|
|
31
|
+
"provider": "azure-openai",
|
|
32
|
+
"config": {
|
|
33
|
+
"model": "text-embedding-3-small",
|
|
34
|
+
"api_version": "2023-05-15",
|
|
35
|
+
"api_key": "YOUR_AZURE_OPENAI_KEY",
|
|
36
|
+
"openai_base_url": "YOUR_AZURE_OPENAI_ENDPOINT",
|
|
37
|
+
"embedding_dims": 1536
|
|
38
|
+
}
|
|
39
|
+
},
|
|
40
|
+
"vector_store": {
|
|
41
|
+
"provider": "milvus",
|
|
42
|
+
"config": {
|
|
43
|
+
"host": "localhost",
|
|
44
|
+
"port": 19530,
|
|
45
|
+
"user": "root",
|
|
46
|
+
"password": "password",
|
|
47
|
+
"db_name": "default"
|
|
48
|
+
}
|
|
49
|
+
},
|
|
50
|
+
"index_params": {
|
|
51
|
+
"metric_type": "L2",
|
|
52
|
+
"index_type": "AUTOINDEX",
|
|
53
|
+
"params": {}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# 3. Initialize the VDBService with the settings
|
|
58
|
+
vdb_service = VDBService(settings=settings)
|
|
59
|
+
|
|
60
|
+
print("VDBService initialized successfully!")
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## 3. Usage Examples
|
|
64
|
+
|
|
65
|
+
### Basic Usage: Get Vector Store with Default Embeddings
|
|
66
|
+
|
|
67
|
+
This example shows how to get a vector store instance using the default embedding model specified in the `embedder` section of your settings.
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
# Get a vector store instance for the "my_documents" collection
|
|
71
|
+
# This will use the "azure-openai" embedder from the settings by default.
|
|
72
|
+
vector_store = vdb_service.get_vector_store(collection_name="my_documents")
|
|
73
|
+
|
|
74
|
+
# You can now use the vector_store object to add or search for documents
|
|
75
|
+
# vector_store.add_texts(["some text to embed"])
|
|
76
|
+
print(f"Successfully retrieved vector store for collection: {vector_store.collection_name}")
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Advanced Usage: Using an Embedding Model from the Model Load Balancer
|
|
80
|
+
|
|
81
|
+
In some cases, you may want to use a specific embedding model managed by the central `ModelLoadBalancer`. This example demonstrates how to retrieve that model first and then pass it to `get_vector_store`.
|
|
82
|
+
|
|
83
|
+
This requires the `ModelLoadBalancer` to have been initialized, as shown in the Initialization section above.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
# 1. Get a specific embedding model from the ModelLoadBalancer
|
|
87
|
+
# The service will call get_model_balancer() internally to get the initialized instance.
|
|
88
|
+
embedding_model = vdb_service.get_embeddings(
|
|
89
|
+
from_model_balancer=True,
|
|
90
|
+
model_type="embedding-large" # Specify the model type configured in the balancer
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
print(f"Retrieved embedding model from balancer: {embedding_model}")
|
|
94
|
+
|
|
95
|
+
# 2. Get a vector store instance using the specified embedding model
|
|
96
|
+
vector_store_from_balancer = vdb_service.get_vector_store(
|
|
97
|
+
collection_name="balancer_collection",
|
|
98
|
+
embeddings=embedding_model # Pass the specific embedding model
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
print(f"Successfully retrieved vector store for collection: {vector_store_from_balancer.collection_name}")
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Getting the Raw Milvus Client
|
|
105
|
+
|
|
106
|
+
If you need to perform operations not exposed by the LangChain `Zilliz` wrapper, you can get direct access to the underlying `MilvusClient`.
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
# Get the raw Milvus client to perform advanced operations
|
|
110
|
+
client = vdb_service.get_vector_client()
|
|
111
|
+
|
|
112
|
+
# For example, list all collections in the database
|
|
113
|
+
collections = client.list_collections()
|
|
114
|
+
print("Available collections:", collections)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Adding and Deleting Documents by Source
|
|
118
|
+
|
|
119
|
+
This example shows a common workflow: adding documents with a specific `source` to a collection, and then using `delete_old_indexes` to remove them based on that source.
|
|
120
|
+
|
|
121
|
+
**Note:** The `delete_old_indexes` method in this example filters on the `source` metadata field. Ensure your implementation matches the field you intend to use for filtering.
|
|
122
|
+
|
|
123
|
+
```python
|
|
124
|
+
from langchain_core.documents import Document
|
|
125
|
+
import time
|
|
126
|
+
|
|
127
|
+
# 1. Get the vector store instance
|
|
128
|
+
collection_name = "test_collection_for_delete"
|
|
129
|
+
vector_store = vdb_service.get_vector_store(collection_name=collection_name)
|
|
130
|
+
|
|
131
|
+
# 2. Prepare documents with 'source' in their metadata.
|
|
132
|
+
# The delete function looks for this specific metadata field.
|
|
133
|
+
docs_to_add = [
|
|
134
|
+
Document(
|
|
135
|
+
page_content="This is a test document about CrewPlus AI.",
|
|
136
|
+
metadata={"source": "http://example.com/crewplus-docs"}
|
|
137
|
+
),
|
|
138
|
+
Document(
|
|
139
|
+
page_content="This is another test document, about LangChain.",
|
|
140
|
+
metadata={"source": "http://example.com/langchain-docs"} # Different source
|
|
141
|
+
)
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
# 3. Add the documents to the collection
|
|
145
|
+
ids = vector_store.add_documents(docs_to_add)
|
|
146
|
+
print(f"Added {len(ids)} documents to collection '{collection_name}'.")
|
|
147
|
+
|
|
148
|
+
# In a real application, you might need a short delay for indexing to complete.
|
|
149
|
+
time.sleep(2)
|
|
150
|
+
|
|
151
|
+
# 4. Verify the documents were added
|
|
152
|
+
results = vector_store.similarity_search("CrewPlus", k=2)
|
|
153
|
+
print(f"Found {len(results)} related documents before deletion.")
|
|
154
|
+
assert len(results) > 0
|
|
155
|
+
|
|
156
|
+
# 5. Delete the documents using the same source
|
|
157
|
+
source_to_delete = "http://example.com/crewplus-docs"
|
|
158
|
+
vdb_service.delete_old_indexes(url=source_to_delete, vdb=vector_store)
|
|
159
|
+
print(f"Called delete_old_indexes for source: {source_to_delete}")
|
|
160
|
+
|
|
161
|
+
# Allow time for the deletion to be processed.
|
|
162
|
+
time.sleep(2)
|
|
163
|
+
|
|
164
|
+
# 6. Verify the documents were deleted
|
|
165
|
+
results_after_delete = vector_store.similarity_search("CrewPlus", k=2)
|
|
166
|
+
print(f"Found {len(results_after_delete)} related documents after deletion.")
|
|
167
|
+
assert len(results_after_delete) == 0
|
|
168
|
+
|
|
169
|
+
# 7. Clean up by dropping the collection
|
|
170
|
+
vdb_service.drop_collection(collection_name=collection_name)
|
|
171
|
+
print(f"Dropped collection '{collection_name}'.")
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### Adding and Deleting Documents by Source ID
|
|
175
|
+
|
|
176
|
+
This example shows how to add documents with a `source_id` and then use `delete_old_indexes_by_id` to remove them.
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
from langchain_core.documents import Document
|
|
180
|
+
import time
|
|
181
|
+
|
|
182
|
+
# 1. Get the vector store instance
|
|
183
|
+
collection_name = "test_collection_for_id_delete"
|
|
184
|
+
vector_store_for_id = vdb_service.get_vector_store(collection_name=collection_name)
|
|
185
|
+
|
|
186
|
+
# 2. Prepare documents with 'source_id' in their metadata.
|
|
187
|
+
docs_with_id = [
|
|
188
|
+
Document(
|
|
189
|
+
page_content="Document for agent A.",
|
|
190
|
+
metadata={"source_id": "agent-a-123"}
|
|
191
|
+
),
|
|
192
|
+
Document(
|
|
193
|
+
page_content="Another document for agent A.",
|
|
194
|
+
metadata={"source_id": "agent-a-123"}
|
|
195
|
+
)
|
|
196
|
+
]
|
|
197
|
+
|
|
198
|
+
# 3. Add the documents to the collection
|
|
199
|
+
ids = vector_store_for_id.add_documents(docs_with_id)
|
|
200
|
+
print(f"Added {len(ids)} documents to collection '{collection_name}'.")
|
|
201
|
+
|
|
202
|
+
time.sleep(2)
|
|
203
|
+
|
|
204
|
+
# 4. Verify the documents were added
|
|
205
|
+
results = vector_store_for_id.similarity_search("agent A", k=2)
|
|
206
|
+
print(f"Found {len(results)} related documents before deletion.")
|
|
207
|
+
assert len(results) == 2
|
|
208
|
+
|
|
209
|
+
# 5. Delete the documents using the source_id
|
|
210
|
+
id_to_delete = "agent-a-123"
|
|
211
|
+
vdb_service.delete_old_indexes_by_id(source_id=id_to_delete, vdb=vector_store_for_id)
|
|
212
|
+
print(f"Called delete_old_indexes_by_id for source_id: {id_to_delete}")
|
|
213
|
+
|
|
214
|
+
time.sleep(2)
|
|
215
|
+
|
|
216
|
+
# 6. Verify the documents were deleted
|
|
217
|
+
results_after_delete = vector_store_for_id.similarity_search("agent A", k=2)
|
|
218
|
+
print(f"Found {len(results_after_delete)} related documents after deletion.")
|
|
219
|
+
assert len(results_after_delete) == 0
|
|
220
|
+
|
|
221
|
+
# 7. Clean up by dropping the collection
|
|
222
|
+
vdb_service.drop_collection(collection_name=collection_name)
|
|
223
|
+
print(f"Dropped collection '{collection_name}'.")
|
docs/index.md
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Welcome to CrewPlus
|
|
2
|
+
|
|
3
|
+
**CrewPlus** provides the foundational services and core components for building advanced AI applications. It is the heart of the CrewPlus ecosystem, designed for scalability, extensibility, and seamless integration.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
This repository, `crewplus-base`, contains the core `crewplus` Python package. It includes essential building blocks for interacting with large language models, managing vector databases, and handling application configuration. Whether you are building a simple chatbot or a complex multi-agent system, CrewPlus offers the robust foundation you need.
|
|
8
|
+
|
|
9
|
+
## The CrewPlus Ecosystem
|
|
10
|
+
|
|
11
|
+
CrewPlus is designed as a modular and extensible ecosystem of packages. This allows you to adopt only the components you need for your specific use case.
|
|
12
|
+
|
|
13
|
+
- **`crewplus` (This package):** The core package containing foundational services for chat, model load balancing, and vector stores.
|
|
14
|
+
- **`crewplus-agents`:** An extension for creating and managing autonomous AI agents.
|
|
15
|
+
- **`crewplus-ingestion`:** Provides robust pipelines for knowledge ingestion and data processing.
|
|
16
|
+
- **`crewplus-integrations`:** A collection of third-party integrations to connect CrewPlus with other services and platforms.
|
|
17
|
+
|
|
18
|
+
## Getting Started
|
|
19
|
+
|
|
20
|
+
To get started, check out our detailed user guides:
|
|
21
|
+
|
|
22
|
+
- **[GeminiChatModel Guide](./GeminiChatModel.md)**: A comprehensive guide to using the `GeminiChatModel` for text, image, and video understanding.
|
|
23
|
+
- **[ModelLoadBalancer Guide](./ModelLoadBalancer.md)**: A guide to using the `ModelLoadBalancer` for managing and accessing different language models.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|