crewplus 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crewplus might be problematic. Click here for more details.
- crewplus/__init__.py +0 -0
- crewplus/services/__init__.py +1 -0
- crewplus/services/gemini_chat_model.py +365 -0
- crewplus-0.1.0.dist-info/METADATA +117 -0
- crewplus-0.1.0.dist-info/RECORD +8 -0
- crewplus-0.1.0.dist-info/WHEEL +4 -0
- crewplus-0.1.0.dist-info/entry_points.txt +4 -0
- crewplus-0.1.0.dist-info/licenses/LICENSE +21 -0
crewplus/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .gemini_chat_model import GeminiChatModel
|
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
from typing import Any, Dict, Iterator, List, Optional, AsyncIterator
|
|
4
|
+
from google import genai
|
|
5
|
+
from langchain_core.language_models import BaseChatModel
|
|
6
|
+
from langchain_core.messages import (
|
|
7
|
+
AIMessage,
|
|
8
|
+
AIMessageChunk,
|
|
9
|
+
BaseMessage,
|
|
10
|
+
HumanMessage,
|
|
11
|
+
SystemMessage,
|
|
12
|
+
)
|
|
13
|
+
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
14
|
+
from langchain_core.callbacks import (
|
|
15
|
+
CallbackManagerForLLMRun,
|
|
16
|
+
AsyncCallbackManagerForLLMRun
|
|
17
|
+
)
|
|
18
|
+
from pydantic import Field, SecretStr
|
|
19
|
+
from langchain_core.utils import convert_to_secret_str
|
|
20
|
+
|
|
21
|
+
class GeminiChatModel(BaseChatModel):
|
|
22
|
+
"""Custom chat model using Google's genai client package directly with real streaming support.
|
|
23
|
+
|
|
24
|
+
This implementation provides direct access to Google's genai features
|
|
25
|
+
while being compatible with LangChain's BaseChatModel interface.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
```python
|
|
29
|
+
model = GeminiChatModel(
|
|
30
|
+
model_name="gemini-2.0-flash",
|
|
31
|
+
google_api_key="your-api-key",
|
|
32
|
+
temperature=0.7
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Basic usage
|
|
36
|
+
response = model.invoke("Hello, how are you?")
|
|
37
|
+
print(response.content)
|
|
38
|
+
|
|
39
|
+
# Streaming usage
|
|
40
|
+
for chunk in model.stream("Tell me a story"):
|
|
41
|
+
print(chunk.content, end="")
|
|
42
|
+
|
|
43
|
+
# Async usage
|
|
44
|
+
async def test_async():
|
|
45
|
+
response = await model.ainvoke("Hello!")
|
|
46
|
+
print(response.content)
|
|
47
|
+
|
|
48
|
+
async for chunk in model.astream("Tell me a story"):
|
|
49
|
+
print(chunk.content, end="")
|
|
50
|
+
```
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
# Model configuration
|
|
54
|
+
model_name: str = Field(default="gemini-2.0-flash", description="The Google model name to use")
|
|
55
|
+
google_api_key: Optional[SecretStr] = Field(default=None, description="Google API key")
|
|
56
|
+
temperature: Optional[float] = Field(default=0.7, description="Sampling temperature")
|
|
57
|
+
max_tokens: Optional[int] = Field(default=None, description="Maximum tokens to generate")
|
|
58
|
+
top_p: Optional[float] = Field(default=None, description="Top-p sampling parameter")
|
|
59
|
+
top_k: Optional[int] = Field(default=None, description="Top-k sampling parameter")
|
|
60
|
+
|
|
61
|
+
# Internal client
|
|
62
|
+
_client: Optional[genai.Client] = None
|
|
63
|
+
|
|
64
|
+
def __init__(self, **kwargs):
|
|
65
|
+
super().__init__(**kwargs)
|
|
66
|
+
|
|
67
|
+
# Get API key from environment if not provided
|
|
68
|
+
if self.google_api_key is None:
|
|
69
|
+
api_key = os.getenv("GOOGLE_API_KEY")
|
|
70
|
+
if api_key:
|
|
71
|
+
self.google_api_key = convert_to_secret_str(api_key)
|
|
72
|
+
|
|
73
|
+
# Initialize the Google GenAI client
|
|
74
|
+
if self.google_api_key:
|
|
75
|
+
self._client = genai.Client(
|
|
76
|
+
api_key=self.google_api_key.get_secret_value()
|
|
77
|
+
)
|
|
78
|
+
else:
|
|
79
|
+
raise ValueError("Google API key is required. Set GOOGLE_API_KEY environment variable or pass google_api_key parameter.")
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def _llm_type(self) -> str:
|
|
83
|
+
"""Return identifier for the model type."""
|
|
84
|
+
return "custom_google_genai"
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def _identifying_params(self) -> Dict[str, Any]:
|
|
88
|
+
"""Return a dictionary of identifying parameters for tracing."""
|
|
89
|
+
return {
|
|
90
|
+
"model_name": self.model_name,
|
|
91
|
+
"temperature": self.temperature,
|
|
92
|
+
"max_tokens": self.max_tokens,
|
|
93
|
+
"top_p": self.top_p,
|
|
94
|
+
"top_k": self.top_k,
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
def _convert_messages_to_genai_format(self, messages: List[BaseMessage]) -> str:
|
|
98
|
+
"""Convert LangChain messages to Google GenAI format.
|
|
99
|
+
|
|
100
|
+
Google GenAI API doesn't support system messages, so we'll convert
|
|
101
|
+
the conversation to a single prompt string with proper formatting.
|
|
102
|
+
"""
|
|
103
|
+
prompt_parts = []
|
|
104
|
+
|
|
105
|
+
for message in messages:
|
|
106
|
+
if isinstance(message, SystemMessage):
|
|
107
|
+
# Convert system message to instruction format
|
|
108
|
+
prompt_parts.append(f"Instructions: {message.content}")
|
|
109
|
+
elif isinstance(message, HumanMessage):
|
|
110
|
+
prompt_parts.append(f"Human: {message.content}")
|
|
111
|
+
elif isinstance(message, AIMessage):
|
|
112
|
+
prompt_parts.append(f"Assistant: {message.content}")
|
|
113
|
+
else:
|
|
114
|
+
# Default to human format for unknown message types
|
|
115
|
+
prompt_parts.append(f"Human: {str(message.content)}")
|
|
116
|
+
|
|
117
|
+
# Add a final prompt for the assistant to respond
|
|
118
|
+
if not prompt_parts or not prompt_parts[-1].startswith("Human:"):
|
|
119
|
+
prompt_parts.append("Human: Please respond to the above.")
|
|
120
|
+
|
|
121
|
+
prompt_parts.append("Assistant:")
|
|
122
|
+
|
|
123
|
+
return "\n\n".join(prompt_parts)
|
|
124
|
+
|
|
125
|
+
def _prepare_generation_config(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
126
|
+
"""Prepare generation configuration for Google GenAI."""
|
|
127
|
+
generation_config = {}
|
|
128
|
+
if self.temperature is not None:
|
|
129
|
+
generation_config["temperature"] = self.temperature
|
|
130
|
+
if self.max_tokens is not None:
|
|
131
|
+
generation_config["max_output_tokens"] = self.max_tokens
|
|
132
|
+
if self.top_p is not None:
|
|
133
|
+
generation_config["top_p"] = self.top_p
|
|
134
|
+
if self.top_k is not None:
|
|
135
|
+
generation_config["top_k"] = self.top_k
|
|
136
|
+
if stop:
|
|
137
|
+
generation_config["stop_sequences"] = stop
|
|
138
|
+
return generation_config
|
|
139
|
+
|
|
140
|
+
def _generate(
|
|
141
|
+
self,
|
|
142
|
+
messages: List[BaseMessage],
|
|
143
|
+
stop: Optional[List[str]] = None,
|
|
144
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
145
|
+
**kwargs: Any,
|
|
146
|
+
) -> ChatResult:
|
|
147
|
+
"""Generate a response using Google's genai client."""
|
|
148
|
+
|
|
149
|
+
# Convert messages to a single prompt string
|
|
150
|
+
prompt = self._convert_messages_to_genai_format(messages)
|
|
151
|
+
|
|
152
|
+
# Prepare generation config
|
|
153
|
+
generation_config = self._prepare_generation_config(stop)
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
# Generate response using Google GenAI
|
|
157
|
+
response = self._client.models.generate_content(
|
|
158
|
+
model=self.model_name,
|
|
159
|
+
contents=prompt,
|
|
160
|
+
config=generation_config if generation_config else None
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# Extract the generated text
|
|
164
|
+
generated_text = response.text if hasattr(response, 'text') else str(response)
|
|
165
|
+
|
|
166
|
+
# Create AI message with response metadata
|
|
167
|
+
message = AIMessage(
|
|
168
|
+
content=generated_text,
|
|
169
|
+
response_metadata={
|
|
170
|
+
"model_name": self.model_name,
|
|
171
|
+
"finish_reason": getattr(response, 'finish_reason', None),
|
|
172
|
+
}
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Create and return ChatResult
|
|
176
|
+
generation = ChatGeneration(message=message)
|
|
177
|
+
return ChatResult(generations=[generation])
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
raise ValueError(f"Error generating content with Google GenAI: {str(e)}")
|
|
181
|
+
|
|
182
|
+
async def _agenerate(
|
|
183
|
+
self,
|
|
184
|
+
messages: List[BaseMessage],
|
|
185
|
+
stop: Optional[List[str]] = None,
|
|
186
|
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
187
|
+
**kwargs: Any,
|
|
188
|
+
) -> ChatResult:
|
|
189
|
+
"""Async generate a response using Google's genai client."""
|
|
190
|
+
|
|
191
|
+
# Convert messages to a single prompt string
|
|
192
|
+
prompt = self._convert_messages_to_genai_format(messages)
|
|
193
|
+
|
|
194
|
+
# Prepare generation config
|
|
195
|
+
generation_config = self._prepare_generation_config(stop)
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
# Generate response using Google GenAI (run in executor for async)
|
|
199
|
+
loop = asyncio.get_event_loop()
|
|
200
|
+
response = await loop.run_in_executor(
|
|
201
|
+
None,
|
|
202
|
+
lambda: self._client.models.generate_content(
|
|
203
|
+
model=self.model_name,
|
|
204
|
+
contents=prompt,
|
|
205
|
+
config=generation_config if generation_config else None
|
|
206
|
+
)
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Extract the generated text
|
|
210
|
+
generated_text = response.text if hasattr(response, 'text') else str(response)
|
|
211
|
+
|
|
212
|
+
# Create AI message with response metadata
|
|
213
|
+
message = AIMessage(
|
|
214
|
+
content=generated_text,
|
|
215
|
+
response_metadata={
|
|
216
|
+
"model_name": self.model_name,
|
|
217
|
+
"finish_reason": getattr(response, 'finish_reason', None),
|
|
218
|
+
}
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Create and return ChatResult
|
|
222
|
+
generation = ChatGeneration(message=message)
|
|
223
|
+
return ChatResult(generations=[generation])
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
raise ValueError(f"Error generating content with Google GenAI: {str(e)}")
|
|
227
|
+
|
|
228
|
+
def _stream(
|
|
229
|
+
self,
|
|
230
|
+
messages: List[BaseMessage],
|
|
231
|
+
stop: Optional[List[str]] = None,
|
|
232
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
233
|
+
**kwargs: Any,
|
|
234
|
+
) -> Iterator[ChatGenerationChunk]:
|
|
235
|
+
"""Stream the output using Google's genai client with real streaming."""
|
|
236
|
+
|
|
237
|
+
# Convert messages to a single prompt string
|
|
238
|
+
prompt = self._convert_messages_to_genai_format(messages)
|
|
239
|
+
|
|
240
|
+
# Prepare generation config
|
|
241
|
+
generation_config = self._prepare_generation_config(stop)
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
# Use Google GenAI streaming
|
|
245
|
+
stream = self._client.models.generate_content_stream(
|
|
246
|
+
model=self.model_name,
|
|
247
|
+
contents=prompt,
|
|
248
|
+
config=generation_config if generation_config else None
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
for chunk_response in stream:
|
|
252
|
+
if hasattr(chunk_response, 'text') and chunk_response.text:
|
|
253
|
+
content = chunk_response.text
|
|
254
|
+
|
|
255
|
+
chunk = ChatGenerationChunk(
|
|
256
|
+
message=AIMessageChunk(
|
|
257
|
+
content=content,
|
|
258
|
+
response_metadata={
|
|
259
|
+
"model_name": self.model_name,
|
|
260
|
+
"finish_reason": getattr(chunk_response, 'finish_reason', None),
|
|
261
|
+
}
|
|
262
|
+
)
|
|
263
|
+
)
|
|
264
|
+
yield chunk
|
|
265
|
+
|
|
266
|
+
# Trigger callback for new token
|
|
267
|
+
if run_manager:
|
|
268
|
+
run_manager.on_llm_new_token(content, chunk=chunk)
|
|
269
|
+
|
|
270
|
+
except Exception as e:
|
|
271
|
+
# Fallback to non-streaming if streaming fails
|
|
272
|
+
try:
|
|
273
|
+
response = self._client.models.generate_content(
|
|
274
|
+
model=self.model_name,
|
|
275
|
+
contents=prompt,
|
|
276
|
+
config=generation_config if generation_config else None
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
generated_text = response.text if hasattr(response, 'text') else str(response)
|
|
280
|
+
|
|
281
|
+
# Simulate streaming by yielding words
|
|
282
|
+
words = generated_text.split()
|
|
283
|
+
for i, word in enumerate(words):
|
|
284
|
+
content = f" {word}" if i > 0 else word
|
|
285
|
+
|
|
286
|
+
chunk = ChatGenerationChunk(
|
|
287
|
+
message=AIMessageChunk(content=content)
|
|
288
|
+
)
|
|
289
|
+
yield chunk
|
|
290
|
+
|
|
291
|
+
if run_manager:
|
|
292
|
+
run_manager.on_llm_new_token(content, chunk=chunk)
|
|
293
|
+
|
|
294
|
+
except Exception as fallback_e:
|
|
295
|
+
raise ValueError(f"Error streaming content with Google GenAI: {str(e)}. Fallback also failed: {str(fallback_e)}")
|
|
296
|
+
|
|
297
|
+
async def _astream(
|
|
298
|
+
self,
|
|
299
|
+
messages: List[BaseMessage],
|
|
300
|
+
stop: Optional[List[str]] = None,
|
|
301
|
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
302
|
+
**kwargs: Any,
|
|
303
|
+
) -> AsyncIterator[ChatGenerationChunk]:
|
|
304
|
+
"""Async stream the output using Google's genai client."""
|
|
305
|
+
|
|
306
|
+
# Convert messages to a single prompt string
|
|
307
|
+
prompt = self._convert_messages_to_genai_format(messages)
|
|
308
|
+
|
|
309
|
+
# Prepare generation config
|
|
310
|
+
generation_config = self._prepare_generation_config(stop)
|
|
311
|
+
|
|
312
|
+
try:
|
|
313
|
+
# Use Google GenAI streaming in async context
|
|
314
|
+
loop = asyncio.get_event_loop()
|
|
315
|
+
|
|
316
|
+
# Run the streaming in executor
|
|
317
|
+
def create_stream():
|
|
318
|
+
return self._client.models.generate_content_stream(
|
|
319
|
+
model=self.model_name,
|
|
320
|
+
contents=prompt,
|
|
321
|
+
config=generation_config if generation_config else None
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
stream = await loop.run_in_executor(None, create_stream)
|
|
325
|
+
|
|
326
|
+
for chunk_response in stream:
|
|
327
|
+
if hasattr(chunk_response, 'text') and chunk_response.text:
|
|
328
|
+
content = chunk_response.text
|
|
329
|
+
|
|
330
|
+
chunk = ChatGenerationChunk(
|
|
331
|
+
message=AIMessageChunk(
|
|
332
|
+
content=content,
|
|
333
|
+
response_metadata={
|
|
334
|
+
"model_name": self.model_name,
|
|
335
|
+
"finish_reason": getattr(chunk_response, 'finish_reason', None),
|
|
336
|
+
}
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
yield chunk
|
|
340
|
+
|
|
341
|
+
# Trigger callback for new token
|
|
342
|
+
if run_manager:
|
|
343
|
+
await run_manager.on_llm_new_token(content, chunk=chunk)
|
|
344
|
+
|
|
345
|
+
except Exception as e:
|
|
346
|
+
# Fallback to async generate and simulate streaming
|
|
347
|
+
try:
|
|
348
|
+
result = await self._agenerate(messages, stop, run_manager, **kwargs)
|
|
349
|
+
generated_text = result.generations[0].message.content
|
|
350
|
+
|
|
351
|
+
# Simulate streaming by yielding words
|
|
352
|
+
words = generated_text.split()
|
|
353
|
+
for i, word in enumerate(words):
|
|
354
|
+
content = f" {word}" if i > 0 else word
|
|
355
|
+
|
|
356
|
+
chunk = ChatGenerationChunk(
|
|
357
|
+
message=AIMessageChunk(content=content)
|
|
358
|
+
)
|
|
359
|
+
yield chunk
|
|
360
|
+
|
|
361
|
+
if run_manager:
|
|
362
|
+
await run_manager.on_llm_new_token(content, chunk=chunk)
|
|
363
|
+
|
|
364
|
+
except Exception as fallback_e:
|
|
365
|
+
raise ValueError(f"Error async streaming content with Google GenAI: {str(e)}. Fallback also failed: {str(fallback_e)}")
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: crewplus
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Base services for CrewPlus AI applications
|
|
5
|
+
Author-Email: Tim Liu <tim@opsmateai.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/your-org/crewplus-base
|
|
8
|
+
Project-URL: Documentation, https://crewplus.readthedocs.io
|
|
9
|
+
Project-URL: Repository, https://github.com/your-org/crewplus-base
|
|
10
|
+
Project-URL: Issues, https://github.com/your-org/crewplus-base/issues
|
|
11
|
+
Requires-Python: <4.0,>=3.11
|
|
12
|
+
Requires-Dist: langchain==0.3.25
|
|
13
|
+
Requires-Dist: langchain-openai==0.3.24
|
|
14
|
+
Requires-Dist: google-genai==1.21.1
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# CrewPlus
|
|
18
|
+
|
|
19
|
+
[](https://badge.fury.io/py/crewplus)
|
|
20
|
+
[](https://opensource.org/licenses/MIT)
|
|
21
|
+
[](https://pypi.org/project/crewplus)
|
|
22
|
+
[](https://travis-ci.com/your-org/crewplus-base)
|
|
23
|
+
|
|
24
|
+
**CrewPlus** provides the foundational services and core components for building advanced AI applications. It is the heart of the CrewPlus ecosystem, designed for scalability, extensibility, and seamless integration.
|
|
25
|
+
|
|
26
|
+
## Overview
|
|
27
|
+
|
|
28
|
+
This repository, `crewplus-base`, contains the core `crewplus` Python package. It includes essential building blocks for interacting with large language models, managing vector databases, and handling application configuration. Whether you are building a simple chatbot or a complex multi-agent system, CrewPlus offers the robust foundation you need.
|
|
29
|
+
|
|
30
|
+
## The CrewPlus Ecosystem
|
|
31
|
+
|
|
32
|
+
CrewPlus is designed as a modular and extensible ecosystem of packages. This allows you to adopt only the components you need for your specific use case.
|
|
33
|
+
|
|
34
|
+
- **`crewplus` (This package):** The core package containing foundational services for chat, model load balancing, and vector stores.
|
|
35
|
+
- **`crewplus-agents`:** An extension for creating and managing autonomous AI agents.
|
|
36
|
+
- **`crewplus-ingestion`:** Provides robust pipelines for knowledge ingestion and data processing.
|
|
37
|
+
- **`crewplus-integrations`:** A collection of third-party integrations to connect CrewPlus with other services and platforms.
|
|
38
|
+
- **`crewplus-enterprise`:** Enterprise-grade features for security, scalability, and support.
|
|
39
|
+
|
|
40
|
+
## Features
|
|
41
|
+
|
|
42
|
+
- **Chat Services:** A unified interface for interacting with various chat models (e.g., `GeminiChatModel`).
|
|
43
|
+
- **Model Load Balancer:** Intelligently distribute requests across multiple LLM endpoints.
|
|
44
|
+
- **Vector DB Services:** Abstractions for working with popular vector stores for retrieval-augmented generation (RAG).
|
|
45
|
+
- **Centralized Configuration:** Manage application settings and secrets from a single source of truth (`core/config.py`).
|
|
46
|
+
|
|
47
|
+
## Installation
|
|
48
|
+
|
|
49
|
+
To install the core `crewplus` package, run the following command:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
pip install crewplus
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Getting Started
|
|
56
|
+
|
|
57
|
+
Here is a simple example of how to use the `GeminiChatModel` to start a conversation with an AI model.
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
# main.py
|
|
61
|
+
from crewplus.services import GeminiChatModel
|
|
62
|
+
|
|
63
|
+
# Initialize the llm (API keys are typically handled by the configuration module)
|
|
64
|
+
llm = GeminiChatModel(google_api_key="your-google-api-key")
|
|
65
|
+
|
|
66
|
+
# Start a conversation
|
|
67
|
+
response = llm.chat("Hello, what is CrewPlus?")
|
|
68
|
+
|
|
69
|
+
print(response)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Project Structure
|
|
73
|
+
|
|
74
|
+
The `crewplus-base` repository is organized to separate core logic, tests, and documentation.
|
|
75
|
+
|
|
76
|
+
```
|
|
77
|
+
crewplus-base/ # GitHub repo 名称
|
|
78
|
+
├── pyproject.toml
|
|
79
|
+
├── README.md
|
|
80
|
+
├── LICENSE
|
|
81
|
+
├── CHANGELOG.md
|
|
82
|
+
├── crewplus/ # PyPI包名对应的目录
|
|
83
|
+
│ └── __init__.py
|
|
84
|
+
│ └── services/
|
|
85
|
+
│ └── __init__.py
|
|
86
|
+
│ └── gemini_chat_model.py
|
|
87
|
+
│ └── model_load_balancer.py
|
|
88
|
+
│ └── vdb_service.py
|
|
89
|
+
│ └── ...
|
|
90
|
+
│ └── vectorstores/
|
|
91
|
+
│ └── ...
|
|
92
|
+
│ └── core/
|
|
93
|
+
│ └── __init__.py
|
|
94
|
+
│ └── config.py
|
|
95
|
+
│ └── ...
|
|
96
|
+
├── tests/
|
|
97
|
+
│ └── ...
|
|
98
|
+
└── notebooks/
|
|
99
|
+
└── ...
|
|
100
|
+
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Deploy to PyPI
|
|
104
|
+
# install deployment tool
|
|
105
|
+
pip install twine
|
|
106
|
+
|
|
107
|
+
# build package
|
|
108
|
+
python -m build
|
|
109
|
+
|
|
110
|
+
# deploy to TestPyPI (Test first)
|
|
111
|
+
python -m twine upload --repository testpypi dist/*
|
|
112
|
+
|
|
113
|
+
# install from TestPyPI
|
|
114
|
+
pip install -i https://test.pypi.org/simple/ crewplus
|
|
115
|
+
|
|
116
|
+
# Deploy to official PyPI
|
|
117
|
+
python -m twine upload dist/*
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
crewplus-0.1.0.dist-info/METADATA,sha256=CWy28dbYLsmh73S3NXZ0Ot1R81I5-ARLdjFR3jnclKk,4507
|
|
2
|
+
crewplus-0.1.0.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
|
3
|
+
crewplus-0.1.0.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
|
4
|
+
crewplus-0.1.0.dist-info/licenses/LICENSE,sha256=2_NHSHRTKB_cTcT_GXgcenOCtIZku8j343mOgAguTfc,1087
|
|
5
|
+
crewplus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
crewplus/services/__init__.py,sha256=MmH2v3N0ZMsuqFNAupkXENjUqvgf5ehQ99H6EzPqLZU,48
|
|
7
|
+
crewplus/services/gemini_chat_model.py,sha256=idibpvbF9asBdJByR2XCHcdd5XHwBnVs_U0udOFXhN4,15081
|
|
8
|
+
crewplus-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) Opsmate AI, Inc.
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|