crewplus 0.2.9__tar.gz → 0.2.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crewplus might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: crewplus
3
- Version: 0.2.9
3
+ Version: 0.2.10
4
4
  Summary: Base services for CrewPlus AI applications
5
5
  Author-Email: Tim Liu <tim@opsmateai.com>
6
6
  License: MIT
@@ -45,7 +45,7 @@ CrewPlus is designed as a modular and extensible ecosystem of packages. This all
45
45
 
46
46
  - **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
47
47
  - **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
48
- - **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
48
+ - **Vector DB Services:** Abstractions for working with popular vector stores for retrieval-augmented generation (RAG).
49
49
 
50
50
 
51
51
  ## Documentation
@@ -25,7 +25,7 @@ CrewPlus is designed as a modular and extensible ecosystem of packages. This all
25
25
 
26
26
  - **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
27
27
  - **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
28
- - **Vector DB Services:** working with popular vector stores (e.g. Milvus, Zilliz Cloud) for retrieval-augmented generation (RAG) and agent memory.
28
+ - **Vector DB Services:** Abstractions for working with popular vector stores for retrieval-augmented generation (RAG).
29
29
 
30
30
 
31
31
  ## Documentation
@@ -0,0 +1,10 @@
1
+ from .services.gemini_chat_model import GeminiChatModel
2
+ from .services.model_load_balancer import ModelLoadBalancer
3
+ from .vectorstores.milvus import SchemaMilvus, VDBService
4
+
5
+ __all__ = [
6
+ "GeminiChatModel",
7
+ "ModelLoadBalancer",
8
+ "SchemaMilvus",
9
+ "VDBService"
10
+ ]
@@ -0,0 +1,6 @@
1
+ from .gemini_chat_model import GeminiChatModel
2
+ from .init_services import init_load_balancer, get_model_balancer
3
+ from .model_load_balancer import ModelLoadBalancer
4
+
5
+
6
+ __all__ = ["GeminiChatModel", "init_load_balancer", "get_model_balancer", "ModelLoadBalancer"]
@@ -3,15 +3,11 @@ from crewplus.services.model_load_balancer import ModelLoadBalancer
3
3
 
4
4
  model_balancer = None
5
5
 
6
- def init_load_balancer(config_path: str = None):
6
+ def init_load_balancer():
7
7
  global model_balancer
8
8
  if model_balancer is None:
9
- # Use parameter if provided, otherwise check env var, then default
10
- final_config_path = config_path or os.getenv(
11
- "MODEL_CONFIG_PATH",
12
- "config/models_config.json" # Fixed default path
13
- )
14
- model_balancer = ModelLoadBalancer(final_config_path)
9
+ config_path = os.getenv("MODEL_CONFIG_PATH", "config/models_config.json")
10
+ model_balancer = ModelLoadBalancer(config_path)
15
11
  model_balancer.load_config() # Load initial configuration synchronously
16
12
 
17
13
  def get_model_balancer() -> ModelLoadBalancer:
@@ -141,7 +141,6 @@ class ModelLoadBalancer:
141
141
  return ChatOpenAI(**kwargs)
142
142
  elif provider == 'azure-openai-embeddings':
143
143
  return AzureOpenAIEmbeddings(
144
- model=model_config['model_name'],
145
144
  azure_deployment=model_config['deployment_name'],
146
145
  openai_api_version=model_config['api_version'],
147
146
  api_key=model_config['api_key'],
@@ -0,0 +1,4 @@
1
+ from .schema_action import Action
2
+ from .schema_document_updater import SchemaDocumentUpdater
3
+
4
+ __all__ = ["Action", "SchemaDocumentUpdater"]
@@ -0,0 +1,5 @@
1
+ from .milvus_schema_manager import MilvusSchemaManager, ZillizSchemaManager
2
+ from .schema_milvus import SchemaMilvus
3
+ from .vdb_service import VDBService
4
+
5
+ __all__ = ["MilvusSchemaManager", "ZillizSchemaManager", "VDBService"]
@@ -5,34 +5,9 @@ import json
5
5
  from pymilvus import DataType
6
6
  from langchain_milvus import Milvus
7
7
  from langchain_core.documents import Document
8
- from crewplus.utils.schema_document_updater import SchemaDocumentUpdater
9
- from crewplus.utils.schema_action import Action
8
+ from ...utils import SchemaDocumentUpdater, Action
10
9
  from .milvus_schema_manager import MilvusSchemaManager
11
10
 
12
- DEFAULT_SCHEMA = """
13
- {
14
- "node_types": {
15
- "Document": {
16
- "properties": {
17
- "pk": {
18
- "type": "INT64",
19
- "is_primary": true,
20
- "auto_id": true
21
- },
22
- "vector": {
23
- "type": "FLOAT_VECTOR",
24
- "dim": 1536
25
- },
26
- "text": {
27
- "type": "VARCHAR",
28
- "max_length": 65535,
29
- "description": "The core text of the memory. This could be a user query, a documented fact, a procedural step, or a log of an event."
30
- }
31
- }
32
- }
33
- }
34
- }
35
- """
36
11
 
37
12
  class SchemaMilvus(Milvus):
38
13
  """