crewplus 0.2.10__tar.gz → 0.2.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.10
3
+ Version: 0.2.11
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -45,7 +45,7 @@ CrewPlus is designed as a modular and extensible ecosystem of packages. This all
45
45
 
46
46
  - **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
47
47
  - **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
48
- - **Vector DB Services:** Abstractions for working with popular vector stores for retrieval-augmented generation (RAG).
48
+ - **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
49
49
 
50
50
 
51
51
  ## Documentation
@@ -114,6 +114,12 @@ crewplus-base/ # GitHub repo name
114
114
  ```
115
115
 
116
116
  ## Deploy to PyPI
117
+
118
+ Clean Previous Build Artifacts:
119
+ Remove the dist/, build/, and *.egg-info/ directories to ensure that no old files are included in the new build.
120
+
121
+ rm -rf dist build *.egg-info
122
+
117
123
  # install deployment tool
118
124
  pip install twine
119
125
 
@@ -25,7 +25,7 @@ CrewPlus is designed as a modular and extensible ecosystem of packages. This all
25
25
 
26
26
  - **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
27
27
  - **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
28
- - **Vector DB Services:** Abstractions for working with popular vector stores for retrieval-augmented generation (RAG).
28
+ - **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
29
29
 
30
30
 
31
31
  ## Documentation
@@ -94,6 +94,12 @@ crewplus-base/ # GitHub repo name
94
94
  ```
95
95
 
96
96
  ## Deploy to PyPI
97
+
98
+ Clean Previous Build Artifacts:
99
+ Remove the dist/, build/, and *.egg-info/ directories to ensure that no old files are included in the new build.
100
+
101
+ rm -rf dist build *.egg-info
102
+
97
103
  # install deployment tool
98
104
  pip install twine
99
105
 
@@ -0,0 +1,20 @@
1
+ import os
2
+ from .model_load_balancer import ModelLoadBalancer
3
+
4
+ model_balancer = None
5
+
6
+ def init_load_balancer(config_path: str = None):
7
+ global model_balancer
8
+ if model_balancer is None:
9
+ # Use parameter if provided, otherwise check env var, then default
10
+ final_config_path = config_path or os.getenv(
11
+ "MODEL_CONFIG_PATH",
12
+ "config/models_config.json" # Fixed default path
13
+ )
14
+ model_balancer = ModelLoadBalancer(final_config_path)
15
+ model_balancer.load_config() # Load initial configuration synchronously
16
+
17
+ def get_model_balancer() -> ModelLoadBalancer:
18
+ if model_balancer is None:
19
+ raise RuntimeError("ModelLoadBalancer not initialized")
20
+ return model_balancer
@@ -141,6 +141,7 @@ class ModelLoadBalancer:
141
141
  return ChatOpenAI(**kwargs)
142
142
  elif provider == 'azure-openai-embeddings':
143
143
  return AzureOpenAIEmbeddings(
144
+ model=model_config['model_name'],
144
145
  azure_deployment=model_config['deployment_name'],
145
146
  openai_api_version=model_config['api_version'],
146
147
  api_key=model_config['api_key'],
@@ -5,9 +5,34 @@ import json
5
5
  from pymilvus import DataType
6
6
  from langchain_milvus import Milvus
7
7
  from langchain_core.documents import Document
8
- from ...utils import SchemaDocumentUpdater, Action
8
+ from ...utils.schema_document_updater import SchemaDocumentUpdater
9
+ from ...utils.schema_action import Action
9
10
  from .milvus_schema_manager import MilvusSchemaManager
10
11
 
12
+ DEFAULT_SCHEMA = """
13
+ {
14
+ "node_types": {
15
+ "Document": {
16
+ "properties": {
17
+ "pk": {
18
+ "type": "INT64",
19
+ "is_primary": true,
20
+ "auto_id": true
21
+ },
22
+ "vector": {
23
+ "type": "FLOAT_VECTOR",
24
+ "dim": 1536
25
+ },
26
+ "text": {
27
+ "type": "VARCHAR",
28
+ "max_length": 65535,
29
+ "description": "The core text of the memory. This could be a user query, a documented fact, a procedural step, or a log of an event."
30
+ }
31
+ }
32
+ }
33
+ }
34
+ }
35
+ """
11
36
 
12
37
  class SchemaMilvus(Milvus):
13
38
  """
@@ -2,7 +2,7 @@
2
2
  # @Author: Cursor
3
3
  # @Date: 2025-02-12
4
4
  # @Last Modified by: Gemini
5
- # @Last Modified time: 2025-07-01
5
+ # @Last Modified time: 2025-07-04
6
6
 
7
7
  import logging
8
8
  from typing import List, Dict, Union, Optional
@@ -11,8 +11,8 @@ from langchain_core.embeddings import Embeddings
11
11
  from langchain_openai import AzureOpenAIEmbeddings
12
12
  from pymilvus import MilvusClient
13
13
 
14
- from crewplus.services.init_services import get_model_balancer
15
- from crewplus.vectorstores.milvus.schema_milvus import SchemaMilvus
14
+ from ...services.init_services import get_model_balancer
15
+ from .schema_milvus import SchemaMilvus, DEFAULT_SCHEMA
16
16
 
17
17
  class VDBService(object):
18
18
  """
@@ -22,17 +22,22 @@ class VDBService(object):
22
22
  and provides helper methods to get embedding functions and vector store instances.
23
23
 
24
24
  Args:
25
- settings (dict): A dictionary containing configuration for the vector store
25
+ settings (dict, optional): A dictionary containing configuration for the vector store
26
26
  and embedding models.
27
+ endpoint (str, optional): The URI for the Zilliz cluster. Can be used for simple
28
+ initialization instead of `settings`.
29
+ token (str, optional): The token for authenticating with Zilliz. Must be provided
30
+ with `endpoint`.
27
31
  schema (str, optional): The schema definition for a collection. Defaults to None.
28
32
  logger (logging.Logger, optional): An optional logger instance. Defaults to None.
29
33
 
30
34
  Raises:
31
- ValueError: If required configurations are missing from the settings dictionary.
35
+ ValueError: If required configurations are missing.
32
36
  NotImplementedError: If an unsupported provider is specified.
33
37
  RuntimeError: If the MilvusClient fails to initialize after a retry.
34
38
 
35
39
  Example:
40
+ >>> # Initialize with a full settings dictionary
36
41
  >>> settings = {
37
42
  ... "embedder": {
38
43
  ... "provider": "azure-openai",
@@ -61,6 +66,10 @@ class VDBService(object):
61
66
  ... }
62
67
  ... }
63
68
  >>> vdb_service = VDBService(settings=settings)
69
+ >>>
70
+ >>> # Alternatively, initialize with an endpoint and token for Zilliz
71
+ >>> # vdb_service_zilliz = VDBService(endpoint="YOUR_ZILLIZ_ENDPOINT", token="YOUR_ZILLIZ_TOKEN")
72
+ >>>
64
73
  >>> # Get the raw Milvus client
65
74
  >>> client = vdb_service.get_vector_client()
66
75
  >>> print(client.list_collections())
@@ -82,17 +91,41 @@ class VDBService(object):
82
91
  connection_args: dict
83
92
  settings: dict
84
93
 
85
- def __init__(self, settings: dict, schema: str = None, logger: logging.Logger = None):
94
+ def __init__(self, settings: dict = None, endpoint: str = None, token: str = None, schema: str = None, logger: logging.Logger = None):
86
95
  """
87
- Initializes the VDBService.
96
+ Initializes the VDBService.
97
+
98
+ Can be initialized in two ways:
99
+ 1. By providing a full `settings` dictionary for complex configurations.
100
+ 2. By providing `endpoint` and `token` for a direct Zilliz connection.
101
+ Note: When using this method, an `embedder` configuration is not created.
102
+ You must either use the `ModelLoadBalancer` or pass an `Embeddings` object
103
+ directly to methods like `get_vector_store`.
88
104
 
89
105
  Args:
90
- settings (dict): Configuration dictionary for the service.
106
+ settings (dict, optional): Configuration dictionary for the service. Defaults to None.
107
+ endpoint (str, optional): The URI for the Zilliz cluster. Used if `settings` is not provided.
108
+ token (str, optional): The token for authenticating with the Zilliz cluster.
91
109
  schema (str, optional): Default schema for new collections. Defaults to None.
92
110
  logger (logging.Logger, optional): Logger instance. Defaults to None.
93
111
  """
94
112
  self.logger = logger or logging.getLogger(__name__)
95
- self.settings = settings
113
+
114
+ if settings:
115
+ self.settings = settings
116
+ elif endpoint and token:
117
+ self.logger.info("Initializing VDBService with endpoint and token for a Zilliz connection.")
118
+ self.settings = {
119
+ "vector_store": {
120
+ "provider": "zilliz",
121
+ "config": {
122
+ "uri": endpoint,
123
+ "token": token
124
+ }
125
+ }
126
+ }
127
+ else:
128
+ raise ValueError("VDBService must be initialized with either a 'settings' dictionary or both 'endpoint' and 'token'.")
96
129
 
97
130
  vector_store_settings = self.settings.get("vector_store")
98
131
  if not vector_store_settings:
@@ -164,7 +197,7 @@ class VDBService(object):
164
197
  """
165
198
  return self._client
166
199
 
167
- def get_embeddings(self, from_model_balancer: bool = False, model_type: Optional[str] = "embedding-large") -> Embeddings:
200
+ def get_embeddings(self, from_model_balancer: bool = False, provider: Optional[str] = "azure-openai", model_type: Optional[str] = "embedding-large") -> Embeddings:
168
201
  """
169
202
  Gets an embedding function, either from the model balancer or directly from settings.
170
203
 
@@ -178,7 +211,7 @@ class VDBService(object):
178
211
  """
179
212
  if from_model_balancer:
180
213
  model_balancer = get_model_balancer()
181
- return model_balancer.get_model(model_type=model_type)
214
+ return model_balancer.get_model(provider=provider, model_type=model_type)
182
215
 
183
216
  embedder_config = self.settings.get("embedder")
184
217
  if not embedder_config:
@@ -212,11 +245,41 @@ class VDBService(object):
212
245
  self.logger.error(f"Unsupported embedding provider: {provider}")
213
246
  raise NotImplementedError(f"Embedding provider '{provider}' is not supported yet.")
214
247
 
248
+ def _ensure_collection_exists(self, collection_name: str, embeddings: Embeddings):
249
+ """
250
+ Checks if a collection exists and creates it if it doesn't.
251
+ This operation is wrapped in a try-except block to handle potential failures
252
+ during collection creation.
253
+ """
254
+ try:
255
+ client = self.get_vector_client()
256
+ if not client.has_collection(collection_name):
257
+ self.logger.info(f"Collection '{collection_name}' does not exist. Creating it.")
258
+
259
+ schema_milvus = SchemaMilvus(
260
+ embedding_function=embeddings,
261
+ collection_name=collection_name,
262
+ connection_args=self.connection_args,
263
+ index_params=self.index_params
264
+ )
265
+
266
+ schema_to_use = self.schema or DEFAULT_SCHEMA
267
+ if not self.schema:
268
+ self.logger.warning(f"No schema provided for VDBService. Using DEFAULT_SCHEMA for collection '{collection_name}'.")
269
+
270
+ schema_milvus.set_schema(schema_to_use)
271
+
272
+ if not schema_milvus.create_collection():
273
+ raise RuntimeError(f"SchemaMilvus failed to create collection '{collection_name}'.")
274
+ except Exception as e:
275
+ self.logger.error(f"An error occurred while ensuring collection '{collection_name}' exists: {e}")
276
+ raise RuntimeError(f"Failed to ensure collection '{collection_name}' exists.") from e
277
+
215
278
  def get_vector_store(self, collection_name: str, embeddings: Embeddings = None, metric_type: str = "L2") -> Zilliz:
216
279
  """
217
280
  Gets a vector store instance, creating it if it doesn't exist for the collection.
218
-
219
- This method caches instances by collection name to avoid re-instantiation.
281
+ This method validates both the embedding function and the vector store connection
282
+ before caching the instance to prevent faulty instances from being reused.
220
283
 
221
284
  Args:
222
285
  collection_name (str): The name of the collection in the vector database.
@@ -239,6 +302,22 @@ class VDBService(object):
239
302
  if embeddings is None:
240
303
  embeddings = self.get_embeddings()
241
304
 
305
+ # Ensure the collection exists before proceeding.
306
+ self._ensure_collection_exists(collection_name, embeddings)
307
+
308
+ # 1. Validate the embedding function before proceeding.
309
+ try:
310
+ self.logger.debug(f"Testing embedding function for collection '{collection_name}'...")
311
+ embeddings.embed_query("validation_test_string")
312
+ self.logger.debug("Embedding function is valid.")
313
+ except Exception as e:
314
+ self.logger.error(
315
+ f"The provided embedding function is invalid and failed with error: {e}. "
316
+ f"Cannot create a vector store for collection '{collection_name}'."
317
+ )
318
+ raise RuntimeError(f"Invalid embedding function provided.") from e
319
+
320
+ # If embeddings are valid, proceed to create the Zilliz instance.
242
321
  index_params = self.index_params or {
243
322
  "metric_type": metric_type,
244
323
  "index_type": "AUTOINDEX",
@@ -257,43 +336,48 @@ class VDBService(object):
257
336
 
258
337
  return vdb
259
338
 
260
- def delete_old_indexes(self, url: str = None, vdb: Zilliz = None) -> None:
339
+ def delete_old_indexes(self, url: str = None, vdb: Zilliz = None) -> (bool | None):
261
340
  """ Delete old indexes of the same source_url
262
341
 
263
342
  Args:
264
343
  url (str): source url
344
+ vdb (Zilliz): Zilliz instance
265
345
  """
346
+ self.logger.info(f"Delete old indexes of the same source_url:{url}")
347
+
266
348
  if url is None or vdb is None:
267
- return
349
+ return None
268
350
 
269
351
  # Delete indexes of the same source_url
270
- expr = "source in [\"" + url + "\"]"
352
+ expr = f'source_url == "{url}" or source == "{url}"'
271
353
  pks = vdb.get_pks(expr)
272
354
 
273
355
  # Delete entities by pks
274
356
  if pks is not None and len(pks) > 0 :
275
- old_items = vdb.delete(pks)
276
- self.logger.info("ingesting document -- delete old indexes -- " + str(old_items))
357
+ res = vdb.delete(pks)
358
+ self.logger.info("Deleted old indexes result: " + str(res))
359
+ return res
277
360
 
278
- def delete_old_indexes_by_id(self, id: str = None, vdb: Zilliz = None) -> None:
361
+ def delete_old_indexes_by_id(self, source_id: str = None, vdb: Zilliz = None) -> (bool | None):
279
362
  """ Delete old indexes of the same source_id
280
363
 
281
364
  Args:
282
- id (str): source id
365
+ source_id (str): source id
283
366
  """
284
- self.logger.info(f"Delete old indexes of the same source_id:{id}")
367
+ self.logger.info(f"Delete old indexes of the same source_id:{source_id}")
285
368
 
286
- if id is None or vdb is None:
287
- return
369
+ if source_id is None or vdb is None:
370
+ return None
288
371
 
289
372
  # Delete indexes of the same source_id
290
- expr = "source_id in [\"" + id + "\"]"
373
+ expr = f'source_id == "{source_id}"'
291
374
  pks = vdb.get_pks(expr)
292
375
 
293
376
  # Delete entities by pks
294
377
  if pks is not None and len(pks) > 0 :
295
- old_items = vdb.delete(pks)
296
- self.logger.info("ingesting document -- delete old indexes -- " + str(old_items))
378
+ res = vdb.delete(pks)
379
+ self.logger.info("Deleted old indexes result: " + str(res))
380
+ return res
297
381
 
298
382
  def drop_collection(self, collection_name: str) -> None:
299
383
  """
@@ -326,12 +410,13 @@ class VDBService(object):
326
410
  self.logger.info(f"Removed '{collection_name}' from instance cache.")
327
411
 
328
412
  def delete_data_by_filter(self, collection_name: str = None, filter: str = None) -> None:
329
- """ Delete a collection
413
+ """ Delete data by filter
330
414
 
331
415
  Args:
332
- collection_name (str): scollection_name
416
+ collection_name (str): collection_name
417
+ filter (str): filter
333
418
  """
334
- self.logger.info(f"drop a collection by name:{collection_name}")
419
+ self.logger.info(f"Delete data by filter:{filter}")
335
420
 
336
421
  try:
337
422
  client=self.get_vector_client()
@@ -0,0 +1,226 @@
1
+ # GeminiChatModel Documentation
2
+
3
+ ## 1. Introduction
4
+
5
+ The `GeminiChatModel` is a custom LangChain-compatible chat model that provides a robust interface to Google's Gemini Pro and Flash models. It is designed to handle multimodal inputs, including text, images, and videos, making it a versatile tool for building advanced AI applications.
6
+
7
+ ### Key Features:
8
+ - **LangChain Compatibility**: Seamlessly integrates into the LangChain ecosystem as a `BaseChatModel`.
9
+ - **Multimodal Support**: Natively processes text, images (from URLs, local paths, or base64), and videos (from local paths, Google Cloud URIs, or raw bytes).
10
+ - **Streaming**: Supports streaming for both standard and multimodal responses.
11
+ - **Advanced Configuration**: Allows fine-tuning of generation parameters like temperature, top-p, top-k, and max tokens.
12
+ - **Video Segment Analysis**: Can process specific time ranges within a video using start and end offsets.
13
+
14
+ ## 2. Installation
15
+
16
+ To use the `GeminiChatModel`, you need to install the `crewplus` package. If you are working within the project repository, you can install it in editable mode:
17
+
18
+ ```bash
19
+ pip install crewplus
20
+ ```
21
+
22
+ ## 3. Initialization
23
+
24
+ First, ensure you have set your Google API key as an environment variable:
25
+
26
+ ```bash
27
+ # For Linux/macOS
28
+ export GOOGLE_API_KEY="YOUR_API_KEY"
29
+
30
+ # For Windows PowerShell
31
+ $env:GEMINI_API_KEY = "YOUR_API_KEY"
32
+ ```
33
+
34
+ Then, you can import and initialize the model in your Python code.
35
+
36
+ ```python
37
+ import logging
38
+ from crewplus.services import GeminiChatModel
39
+ from langchain_core.messages import HumanMessage
40
+
41
+ # Optional: Configure a logger for detailed output
42
+ logging.basicConfig(level=logging.INFO)
43
+ test_logger = logging.getLogger(__name__)
44
+
45
+ # Initialize the model
46
+ # You can also pass the google_api_key directly as a parameter
47
+ model = GeminiChatModel(
48
+ model_name="gemini-2.5-flash", # Or "gemini-1.5-pro"
49
+ logger=test_logger,
50
+ temperature=0.0,
51
+ )
52
+ ```
53
+
54
+ ## 4. Basic Usage (Text-only)
55
+
56
+ The model can be used for simple text-based conversations using `.invoke()` or `.stream()`.
57
+
58
+ ```python
59
+ # Using invoke for a single response
60
+ response = model.invoke("Hello, how are you?")
61
+ print(response.content)
62
+
63
+ # Using stream for a chunked response
64
+ print("\\n--- Streaming Response ---")
65
+ for chunk in model.stream("Tell me a short story."):
66
+ print(chunk.content, end="", flush=True)
67
+ ```
68
+
69
+ ## 5. Image Understanding
70
+
71
+ `GeminiChatModel` can understand images provided via a URL or as base64 encoded data.
72
+
73
+ ### Example 1: Image from a URL
74
+
75
+ You can provide a direct URL to an image.
76
+
77
+ ```python
78
+ from langchain_core.messages import HumanMessage
79
+
80
+ url_message = HumanMessage(
81
+ content=[
82
+ {"type": "text", "text": "Describe this image:"},
83
+ {
84
+ "type": "image_url",
85
+ "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
86
+ },
87
+ ]
88
+ )
89
+ url_response = model.invoke([url_message])
90
+ print("Image response (URL):", url_response.content)
91
+ ```
92
+ > **Sample Output:**
93
+ > The image shows a wooden boardwalk stretching into the distance through a field of tall, green grass... The overall impression is one of tranquility and natural beauty.
94
+
95
+ ### Example 2: Local Image (Base64)
96
+
97
+ You can also send a local image file by encoding it in base64.
98
+
99
+ ```python
100
+ import base64
101
+ from langchain_core.messages import HumanMessage
102
+
103
+ image_path = "./notebooks/test_image_202506191.jpg"
104
+ try:
105
+ with open(image_path, "rb") as image_file:
106
+ encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
107
+
108
+ image_message = HumanMessage(
109
+ content=[
110
+ {"type": "text", "text": "Describe this photo and its background story."},
111
+ {
112
+ "type": "image_url",
113
+ "image_url": {
114
+ "url": f"data:image/jpeg;base64,{encoded_string}"
115
+ }
116
+ },
117
+ ]
118
+ )
119
+ image_response = model.invoke([image_message])
120
+ print("Image response (base64):", image_response.content)
121
+ except FileNotFoundError:
122
+ print(f"Image file not found at {image_path}, skipping base64 example.")
123
+ ```
124
+ > **Sample Output:**
125
+ > This image is a movie still from the 2017 Japanese thriller "22 Year Old's Confession: I am the Murderer"... The four women in the photo are the victims of a serial killer...
126
+
127
+ ## 6. Video Understanding
128
+
129
+ The model supports video analysis from uploaded files, URIs, and raw bytes.
130
+
131
+ **Important Note:** The Gemini API does **not** support common public video URLs (e.g., YouTube, Loom, or public MP4 links). Videos must be uploaded to Google's servers first to get a processable URI.
132
+
133
+ ### Example 1: Large Video File (>20MB)
134
+
135
+ For large videos, you must first upload the file using the `google-genai` client to get a file object.
136
+
137
+ ```python
138
+ from google import genai
139
+ import os
140
+ from langchain_core.messages import HumanMessage
141
+
142
+ # Initialize the Google GenAI client
143
+ client = genai.Client(api_key=os.environ["GOOGLE_API_KEY"])
144
+
145
+ # Upload the video file
146
+ video_path = "./notebooks/manufacturing_process_tutorial.mp4"
147
+ print("Uploading video... this may take a moment.")
148
+ video_file_obj = client.files.upload(file=video_path)
149
+ print(f"Video uploaded successfully. File name: {video_file_obj.name}")
150
+
151
+ # Use the uploaded file object in the prompt
152
+ video_message = HumanMessage(
153
+ content=[
154
+ {"type": "text", "text": "Summarize this video and provide timestamps for key events."},
155
+ {"type": "video_file", "file": video_file_obj},
156
+ ]
157
+ )
158
+ video_response = model.invoke([video_message])
159
+ print("Video response:", video_response.content)
160
+ ```
161
+
162
+ > **Sample Output:**
163
+ > This video provides a step-by-step guide on how to correct a mis-set sidewall during tire manufacturing...
164
+ > **Timestamps:**
165
+ > * **0:04:** Applying product package to some material
166
+ > * **0:12:** Splice product Together and Prepare some material
167
+ > ...
168
+
169
+ ### Example 2: Video with Time Offsets
170
+
171
+ You can analyze just a specific portion of a video by providing a `start_offset` and `end_offset`. This works with video URIs obtained after uploading.
172
+
173
+ ```python
174
+ # Assuming 'video_file_obj' is available from the previous step
175
+ video_uri = video_file_obj.uri
176
+
177
+ offset_message = HumanMessage(
178
+ content=[
179
+ {"type": "text", "text": "Transcribe the events in this video segment."},
180
+ {
181
+ "type": "video_file",
182
+ "url": video_uri,
183
+ "start_offset": "5s",
184
+ "end_offset": "30s"
185
+ }
186
+ ]
187
+ )
188
+
189
+ print("Streaming response for video segment:")
190
+ for chunk in model.stream([offset_message]):
191
+ print(chunk.content, end="", flush=True)
192
+ ```
193
+ > **Sample Output:**
194
+ > This video demonstrates the process of applying Component A/Component B material to an assembly drum in a manufacturing setting...
195
+ > **Transcription:**
196
+ > **0:05 - 0:12:** A worker is shown applying a material...
197
+ > **0:12 - 0:23:** The worker continues to prepare the material on the drum...
198
+
199
+ ### Example 3: Small Video File (<20MB)
200
+
201
+ For small videos, you can pass the raw bytes directly without a separate upload step.
202
+
203
+ ```python
204
+ from langchain_core.messages import HumanMessage
205
+
206
+ try:
207
+ with open("./notebooks/product_demo_v1.mp4", "rb") as video_file:
208
+ video_bytes = video_file.read()
209
+
210
+ video_message = HumanMessage(
211
+ content=[
212
+ {"type": "text", "text": "What is happening in this video?"},
213
+ {
214
+ "type": "video_file",
215
+ "data": video_bytes,
216
+ "mime_type": "video/mp4" # Mime type is required for raw data
217
+ },
218
+ ]
219
+ )
220
+ video_response = model.invoke([video_message])
221
+ print("Video response (bytes):", video_response.content)
222
+ except FileNotFoundError:
223
+ print("Video file not found.")
224
+ except Exception as e:
225
+ print(f"Video processing with bytes failed: {e}")
226
+ ```
@@ -0,0 +1,134 @@
1
+ # ModelLoadBalancer Documentation
2
+
3
+ ## 1. Introduction
4
+
5
+ The `ModelLoadBalancer` is a utility class designed to manage and provide access to various language models from different providers, such as Azure OpenAI and Google GenAI. It loads model configurations from a JSON file and allows you to retrieve specific models by their deployment name or a combination of provider and type.
6
+
7
+ ### Key Features:
8
+ - **Centralized Model Management**: Manage all your model configurations in a single JSON file.
9
+ - **On-demand Model Loading**: Models are instantiated and loaded when requested.
10
+ - **Provider Agnostic**: Supports multiple model providers.
11
+ - **Flexible Retrieval**: Get models by a unique deployment name.
12
+
13
+ ## 2. Initialization
14
+
15
+ To use the `ModelLoadBalancer`, you need to initialize it with the path to your model configuration file.
16
+
17
+ ```python
18
+ from crewplus.services.model_load_balancer import ModelLoadBalancer
19
+
20
+ # Initialize the balancer with the path to your config file
21
+ config_path = "tests/models_config.json" # Adjust the path as needed
22
+ balancer = ModelLoadBalancer(config_path=config_path)
23
+
24
+ # Load the configurations and instantiate the models
25
+ balancer.load_config()
26
+ ```
27
+
28
+ ## 3. Configuration File
29
+
30
+ The `ModelLoadBalancer` uses a JSON file to configure the available models. Here is an example of what the configuration file looks like. The `deployment_name` is used to retrieve a specific model.
31
+
32
+ ```json
33
+ {
34
+ "models": [
35
+ {
36
+ "id": 3,
37
+ "provider": "azure-openai",
38
+ "type": "inference",
39
+ "deployment_name": "gpt-4.1",
40
+ "api_version": "2025-01-01-preview",
41
+ "api_base": "https://crewplus-eastus2.openai.azure.com",
42
+ "api_key": "your-api-key"
43
+ },
44
+ {
45
+ "id": 7,
46
+ "provider": "google-genai",
47
+ "type": "inference",
48
+ "deployment_name": "gemini-2.5-flash",
49
+ "api_key": "your-google-api-key"
50
+ },
51
+ {
52
+ "id": 8,
53
+ "provider": "google-genai",
54
+ "type": "ingestion",
55
+ "deployment_name": "gemini-2.5-pro",
56
+ "api_key": "your-google-api-key"
57
+ }
58
+ ]
59
+ }
60
+ ```
61
+
62
+ ## 4. Getting a Model
63
+
64
+ You can retrieve a model instance using the `get_model` method and passing the `deployment_name`.
65
+
66
+ ### Get `gemini-2.5-flash`
67
+ ```python
68
+ gemini_flash_model = balancer.get_model(deployment_name="gemini-2.5-flash")
69
+
70
+ # Now you can use the model
71
+ # from langchain_core.messages import HumanMessage
72
+ # response = gemini_flash_model.invoke([HumanMessage(content="Hello!")])
73
+ # print(response.content)
74
+ ```
75
+
76
+ ### Get `gemini-2.5-pro`
77
+ ```python
78
+ gemini_pro_model = balancer.get_model(deployment_name="gemini-2.5-pro")
79
+ ```
80
+
81
+ ### Get `gpt-4.1`
82
+ ```python
83
+ gpt41_model = balancer.get_model(deployment_name="gpt-4.1")
84
+ ```
85
+
86
+ ### Get `o3mini`
87
+ The model `o3mini` is identified by the deployment name `gpt-o3mini-eastus2-RPM25`.
88
+ ```python
89
+ o3mini_model = balancer.get_model(deployment_name="gpt-o3mini-eastus2-RPM25")
90
+ ```
91
+
92
+ ## 5. Global Access with `init_load_balancer`
93
+
94
+ The `init_load_balancer` function provides a convenient singleton pattern for accessing the `ModelLoadBalancer` throughout your application without passing the instance around.
95
+
96
+ First, you initialize the balancer once at the start of your application.
97
+
98
+ ### Initialization
99
+
100
+ You can initialize it in several ways:
101
+
102
+ **1. Default Initialization**
103
+
104
+ This will look for the `MODEL_CONFIG_PATH` environment variable, or use the default path `_config/models_config.json`.
105
+
106
+ ```python
107
+ from crewplus.services.init_services import init_load_balancer
108
+
109
+ init_load_balancer()
110
+ ```
111
+
112
+ **2. Initialization with a Custom Path**
113
+
114
+ You can also provide a direct path to your configuration file.
115
+
116
+ ```python
117
+ from crewplus.services.init_services import init_load_balancer
118
+
119
+ init_load_balancer(config_path="path/to/your/models_config.json")
120
+ ```
121
+
122
+ ### Getting the Balancer and Models
123
+
124
+ Once initialized, you can retrieve the `ModelLoadBalancer` instance from anywhere in your code using `get_model_balancer`.
125
+
126
+ ```python
127
+ from crewplus.services.init_services import get_model_balancer
128
+
129
+ # Get the balancer instance
130
+ balancer = get_model_balancer()
131
+
132
+ # Get a model by deployment name
133
+ gemini_flash_model = balancer.get_model(deployment_name="gemini-2.5-flash")
134
+ ```
@@ -0,0 +1,238 @@
1
+ # VDBService Documentation
2
+
3
+ ## 1. Introduction
4
+
5
+ The `VDBService` is a centralized service class designed to manage connections to vector databases (Milvus and Zilliz) and handle the instantiation of embedding models. It simplifies interactions with your vector store by reading all necessary configurations from a single `settings` object.
6
+
7
+ ### Key Features:
8
+ - **Centralized Configuration**: Manages database connections and embedding model settings from a single Python dictionary.
9
+ - **Provider-Agnostic Client**: Supports both Milvus and Zilliz as vector store providers.
10
+ - **Resilient Connection**: Includes a built-in retry mechanism when first connecting to the vector database.
11
+ - **Instance Caching**: Caches `Zilliz` vector store instances by collection name to prevent re-instantiation and improve performance.
12
+ - **Flexible Embedding Models**: Can retrieve embedding models from either the global `ModelLoadBalancer` or directly from the configuration settings.
13
+
14
+ ## 2. Initialization
15
+
16
+ To use the `VDBService`, you must first prepare a `settings` dictionary containing the configuration for your vector store and embedding provider. You then pass this dictionary to the service's constructor.
17
+
18
+ If you plan to use embedding models from the global `ModelLoadBalancer`, you must initialize it first.
19
+
20
+ ```python
21
+ from crewplus.vectorstores.milvus.vdb_service import VDBService
22
+ from crewplus.services.init_services import init_load_balancer
23
+
24
+ # 1. (Optional) Initialize the global model load balancer if you plan to use it.
25
+ # This should be done once when your application starts.
26
+ init_load_balancer(config_path="path/to/your/models_config.json")
27
+
28
+ # 2. Define the configuration for the VDBService
29
+ settings = {
30
+ "embedder": {
31
+ "provider": "azure-openai",
32
+ "config": {
33
+ "model": "text-embedding-3-small",
34
+ "api_version": "2023-05-15",
35
+ "api_key": "YOUR_AZURE_OPENAI_KEY",
36
+ "openai_base_url": "YOUR_AZURE_OPENAI_ENDPOINT",
37
+ "embedding_dims": 1536
38
+ }
39
+ },
40
+ "vector_store": {
41
+ "provider": "milvus",
42
+ "config": {
43
+ "host": "localhost",
44
+ "port": 19530,
45
+ "user": "root",
46
+ "password": "password",
47
+ "db_name": "default"
48
+ }
49
+ },
50
+ "index_params": {
51
+ "metric_type": "L2",
52
+ "index_type": "AUTOINDEX",
53
+ "params": {}
54
+ }
55
+ }
56
+
57
+ # 3. Initialize the VDBService with the settings
58
+ vdb_service = VDBService(settings=settings)
59
+
60
+ print("VDBService initialized successfully!")
61
+ ```
62
+
63
+ **Alternative Initialization for Zilliz**
64
+
65
+ For a simpler Zilliz Cloud connection, you can initialize the service directly with your endpoint and token.
66
+
67
+ ```python
68
+ # Initialize directly with Zilliz credentials
69
+ vdb_service_zilliz = VDBService(
70
+ endpoint="YOUR_ZILLIZ_ENDPOINT",
71
+ token="YOUR_ZILLIZ_TOKEN"
72
+ )
73
+
74
+ print("VDBService for Zilliz initialized successfully!")
75
+ ```
76
+
77
+ ## 3. Usage Examples
78
+
79
+ ### Basic Usage: Get Vector Store with Default Embeddings
80
+
81
+ This example shows how to get a vector store instance using the default embedding model specified in the `embedder` section of your settings.
82
+
83
+ ```python
84
+ # Get a vector store instance for the "my_documents" collection
85
+ # This will use the "azure-openai" embedder from the settings by default.
86
+ vector_store = vdb_service.get_vector_store(collection_name="my_documents")
87
+
88
+ # You can now use the vector_store object to add or search for documents
89
+ # vector_store.add_texts(["some text to embed"])
90
+ print(f"Successfully retrieved vector store for collection: {vector_store.collection_name}")
91
+ ```
92
+
93
+ ### Advanced Usage: Using an Embedding Model from the Model Load Balancer
94
+
95
+ In some cases, you may want to use a specific embedding model managed by the central `ModelLoadBalancer`. This example demonstrates how to retrieve that model first and then pass it to `get_vector_store`.
96
+
97
+ This requires the `ModelLoadBalancer` to have been initialized, as shown in the Initialization section above.
98
+
99
+ ```python
100
+ # 1. Get a specific embedding model from the ModelLoadBalancer
101
+ # The service will call get_model_balancer() internally to get the initialized instance.
102
+ embedding_model = vdb_service.get_embeddings(
103
+ from_model_balancer=True,
104
+ provider="azure-openai-embeddings",
105
+ model_type="embedding-large" # Specify the model type configured in the balancer
106
+ )
107
+
108
+ print(f"Retrieved embedding model from balancer: {embedding_model}")
109
+
110
+ # 2. Get a vector store instance using the specified embedding model
111
+ vector_store_from_balancer = vdb_service.get_vector_store(
112
+ collection_name="balancer_collection",
113
+ embeddings=embedding_model # Pass the specific embedding model
114
+ )
115
+
116
+ print(f"Successfully retrieved vector store for collection: {vector_store_from_balancer.collection_name}")
117
+ ```
118
+
119
+ ### Getting the Raw Milvus Client
120
+
121
+ If you need to perform operations not exposed by the LangChain `Zilliz` wrapper, you can get direct access to the underlying `MilvusClient`.
122
+
123
+ ```python
124
+ # Get the raw Milvus client to perform advanced operations
125
+ client = vdb_service.get_vector_client()
126
+
127
+ # For example, list all collections in the database
128
+ collections = client.list_collections()
129
+ print("Available collections:", collections)
130
+ ```
131
+
132
+ ### Adding and Deleting Documents by Source
133
+
134
+ This example shows a common workflow: adding documents with a specific `source` to a collection, and then using `delete_old_indexes` to remove them based on that source.
135
+
136
+ **Note:** The `delete_old_indexes` method in this example filters on the `source` metadata field. Ensure your implementation matches the field you intend to use for filtering.
137
+
138
+ ```python
139
+ from langchain_core.documents import Document
140
+ import time
141
+
142
+ # 1. Get the vector store instance
143
+ collection_name = "test_collection_for_delete"
144
+ vector_store = vdb_service.get_vector_store(collection_name=collection_name)
145
+
146
+ # 2. Prepare documents with 'source' in their metadata.
147
+ # The delete function looks for this specific metadata field.
148
+ docs_to_add = [
149
+ Document(
150
+ page_content="This is a test document about CrewPlus AI.",
151
+ metadata={"source": "http://example.com/crewplus-docs"}
152
+ ),
153
+ Document(
154
+ page_content="This is another test document, about LangChain.",
155
+ metadata={"source": "http://example.com/langchain-docs"} # Different source
156
+ )
157
+ ]
158
+
159
+ # 3. Add the documents to the collection
160
+ ids = vector_store.add_documents(docs_to_add)
161
+ print(f"Added {len(ids)} documents to collection '{collection_name}'.")
162
+
163
+ # In a real application, you might need a short delay for indexing to complete.
164
+ time.sleep(2)
165
+
166
+ # 4. Verify the documents were added
167
+ results = vector_store.similarity_search("CrewPlus", k=2)
168
+ print(f"Found {len(results)} related documents before deletion.")
169
+ assert len(results) > 0
170
+
171
+ # 5. Delete the documents using the same source
172
+ source_to_delete = "http://example.com/crewplus-docs"
173
+ vdb_service.delete_old_indexes(url=source_to_delete, vdb=vector_store)
174
+ print(f"Called delete_old_indexes for source: {source_to_delete}")
175
+
176
+ # Allow time for the deletion to be processed.
177
+ time.sleep(2)
178
+
179
+ # 6. Verify the documents were deleted
180
+ results_after_delete = vector_store.similarity_search("CrewPlus", k=2)
181
+ print(f"Found {len(results_after_delete)} related documents after deletion.")
182
+ assert len(results_after_delete) == 0
183
+
184
+ # 7. Clean up by dropping the collection
185
+ vdb_service.drop_collection(collection_name=collection_name)
186
+ print(f"Dropped collection '{collection_name}'.")
187
+ ```
188
+
189
+ ### Adding and Deleting Documents by Source ID
190
+
191
+ This example shows how to add documents with a `source_id` and then use `delete_old_indexes_by_id` to remove them.
192
+
193
+ ```python
194
+ from langchain_core.documents import Document
195
+ import time
196
+
197
+ # 1. Get the vector store instance
198
+ collection_name = "test_collection_for_id_delete"
199
+ vector_store_for_id = vdb_service.get_vector_store(collection_name=collection_name)
200
+
201
+ # 2. Prepare documents with 'source_id' in their metadata.
202
+ docs_with_id = [
203
+ Document(
204
+ page_content="Document for agent A.",
205
+ metadata={"source_id": "agent-a-123"}
206
+ ),
207
+ Document(
208
+ page_content="Another document for agent A.",
209
+ metadata={"source_id": "agent-a-123"}
210
+ )
211
+ ]
212
+
213
+ # 3. Add the documents to the collection
214
+ ids = vector_store_for_id.add_documents(docs_with_id)
215
+ print(f"Added {len(ids)} documents to collection '{collection_name}'.")
216
+
217
+ time.sleep(2)
218
+
219
+ # 4. Verify the documents were added
220
+ results = vector_store_for_id.similarity_search("agent A", k=2)
221
+ print(f"Found {len(results)} related documents before deletion.")
222
+ assert len(results) == 2
223
+
224
+ # 5. Delete the documents using the source_id
225
+ id_to_delete = "agent-a-123"
226
+ vdb_service.delete_old_indexes_by_id(source_id=id_to_delete, vdb=vector_store_for_id)
227
+ print(f"Called delete_old_indexes_by_id for source_id: {id_to_delete}")
228
+
229
+ time.sleep(2)
230
+
231
+ # 6. Verify the documents were deleted
232
+ results_after_delete = vector_store_for_id.similarity_search("agent A", k=2)
233
+ print(f"Found {len(results_after_delete)} related documents after deletion.")
234
+ assert len(results_after_delete) == 0
235
+
236
+ # 7. Clean up by dropping the collection
237
+ vdb_service.drop_collection(collection_name=collection_name)
238
+ print(f"Dropped collection '{collection_name}'.")
@@ -0,0 +1,23 @@
1
+ # Welcome to CrewPlus
2
+
3
+ **CrewPlus** provides the foundational services and core components for building advanced AI applications. It is the heart of the CrewPlus ecosystem, designed for scalability, extensibility, and seamless integration.
4
+
5
+ ## Overview
6
+
7
+ This repository, `crewplus-base`, contains the core `crewplus` Python package. It includes essential building blocks for interacting with large language models, managing vector databases, and handling application configuration. Whether you are building a simple chatbot or a complex multi-agent system, CrewPlus offers the robust foundation you need.
8
+
9
+ ## The CrewPlus Ecosystem
10
+
11
+ CrewPlus is designed as a modular and extensible ecosystem of packages. This allows you to adopt only the components you need for your specific use case.
12
+
13
+ - **`crewplus` (This package):** The core package containing foundational services for chat, model load balancing, and vector stores.
14
+ - **`crewplus-agents`:** An extension for creating and managing autonomous AI agents.
15
+ - **`crewplus-ingestion`:** Provides robust pipelines for knowledge ingestion and data processing.
16
+ - **`crewplus-integrations`:** A collection of third-party integrations to connect CrewPlus with other services and platforms.
17
+
18
+ ## Getting Started
19
+
20
+ To get started, check out our detailed user guides:
21
+
22
+ - **[GeminiChatModel Guide](./GeminiChatModel.md)**: A comprehensive guide to using the `GeminiChatModel` for text, image, and video understanding.
23
+ - **[ModelLoadBalancer Guide](./ModelLoadBalancer.md)**: A guide to using the `ModelLoadBalancer` for managing and accessing different language models.
@@ -6,7 +6,7 @@ build-backend = "pdm.backend"
6
6
 
7
7
  [project]
8
8
  name = "crewplus"
9
- version = "0.2.10"
9
+ version = "0.2.11"
10
10
  description = "Base services for CrewPlus AI applications"
11
11
  authors = [
12
12
  { name = "Tim Liu", email = "tim@opsmateai.com" },
@@ -32,4 +32,8 @@ Documentation = "https://crewplus.readthedocs.io"
32
32
  Repository = "https://github.com/your-org/crewplus-base"
33
33
  Issues = "https://github.com/your-org/crewplus-base/issues"
34
34
 
35
- [tool]
35
+ [tool.pdm.build]
36
+ includes = [
37
+ "crewplus/",
38
+ "docs/",
39
+ ]
@@ -1,16 +0,0 @@
1
- import os
2
- from crewplus.services.model_load_balancer import ModelLoadBalancer
3
-
4
- model_balancer = None
5
-
6
- def init_load_balancer():
7
- global model_balancer
8
- if model_balancer is None:
9
- config_path = os.getenv("MODEL_CONFIG_PATH", "config/models_config.json")
10
- model_balancer = ModelLoadBalancer(config_path)
11
- model_balancer.load_config() # Load initial configuration synchronously
12
-
13
- def get_model_balancer() -> ModelLoadBalancer:
14
- if model_balancer is None:
15
- raise RuntimeError("ModelLoadBalancer not initialized")
16
- return model_balancer
File without changes