local-coze 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. local_coze/__init__.py +110 -0
  2. local_coze/cli/__init__.py +3 -0
  3. local_coze/cli/chat.py +126 -0
  4. local_coze/cli/cli.py +34 -0
  5. local_coze/cli/constants.py +7 -0
  6. local_coze/cli/db.py +81 -0
  7. local_coze/cli/embedding.py +193 -0
  8. local_coze/cli/image.py +162 -0
  9. local_coze/cli/knowledge.py +195 -0
  10. local_coze/cli/search.py +198 -0
  11. local_coze/cli/utils.py +41 -0
  12. local_coze/cli/video.py +191 -0
  13. local_coze/cli/video_edit.py +888 -0
  14. local_coze/cli/voice.py +351 -0
  15. local_coze/core/__init__.py +25 -0
  16. local_coze/core/client.py +253 -0
  17. local_coze/core/config.py +58 -0
  18. local_coze/core/exceptions.py +67 -0
  19. local_coze/database/__init__.py +29 -0
  20. local_coze/database/client.py +170 -0
  21. local_coze/database/migration.py +342 -0
  22. local_coze/embedding/__init__.py +31 -0
  23. local_coze/embedding/client.py +350 -0
  24. local_coze/embedding/models.py +130 -0
  25. local_coze/image/__init__.py +19 -0
  26. local_coze/image/client.py +110 -0
  27. local_coze/image/models.py +163 -0
  28. local_coze/knowledge/__init__.py +19 -0
  29. local_coze/knowledge/client.py +148 -0
  30. local_coze/knowledge/models.py +45 -0
  31. local_coze/llm/__init__.py +25 -0
  32. local_coze/llm/client.py +317 -0
  33. local_coze/llm/models.py +48 -0
  34. local_coze/memory/__init__.py +14 -0
  35. local_coze/memory/client.py +176 -0
  36. local_coze/s3/__init__.py +12 -0
  37. local_coze/s3/client.py +580 -0
  38. local_coze/s3/models.py +18 -0
  39. local_coze/search/__init__.py +19 -0
  40. local_coze/search/client.py +183 -0
  41. local_coze/search/models.py +57 -0
  42. local_coze/video/__init__.py +17 -0
  43. local_coze/video/client.py +347 -0
  44. local_coze/video/models.py +39 -0
  45. local_coze/video_edit/__init__.py +23 -0
  46. local_coze/video_edit/examples.py +340 -0
  47. local_coze/video_edit/frame_extractor.py +176 -0
  48. local_coze/video_edit/models.py +362 -0
  49. local_coze/video_edit/video_edit.py +631 -0
  50. local_coze/voice/__init__.py +17 -0
  51. local_coze/voice/asr.py +82 -0
  52. local_coze/voice/models.py +86 -0
  53. local_coze/voice/tts.py +94 -0
  54. local_coze-0.0.1.dist-info/METADATA +636 -0
  55. local_coze-0.0.1.dist-info/RECORD +58 -0
  56. local_coze-0.0.1.dist-info/WHEEL +4 -0
  57. local_coze-0.0.1.dist-info/entry_points.txt +3 -0
  58. local_coze-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,163 @@
1
+ import re
2
+ from typing import Optional, Union, List, Literal
3
+ from pydantic import BaseModel, Field, field_validator
4
+
5
+ from ..core.exceptions import ValidationError
6
+
7
+
8
+ class ImageConfig:
9
+ DEFAULT_MODEL = "doubao-seedream-4-5-251128"
10
+ DEFAULT_SIZE = "2K"
11
+ DEFAULT_CUSTOM_SIZE = "2048x2048"
12
+ DEFAULT_WATERMARK = True
13
+ DEFAULT_RESPONSE_FORMAT = "url"
14
+ DEFAULT_OPTIMIZE_PROMPT_MODE = "standard"
15
+ DEFAULT_SEQUENTIAL_IMAGE_GENERATION = "disabled"
16
+ DEFAULT_SEQUENTIAL_IMAGE_GENERATION_MAX_IMAGES = 15
17
+
18
+ MIN_TOTAL_PIXELS = 2560 * 1440
19
+ MAX_TOTAL_PIXELS = 4096 * 4096
20
+ MIN_ASPECT_RATIO = 1 / 16
21
+ MAX_ASPECT_RATIO = 16
22
+
23
+ PRESET_SIZES = ["2K", "4K"]
24
+
25
+
26
+ class ImageSize:
27
+ @staticmethod
28
+ def validate(size: str) -> str:
29
+ if size in ImageConfig.PRESET_SIZES:
30
+ return size
31
+
32
+ match = re.match(r'^(\d+)x(\d+)$', size)
33
+ if not match:
34
+ raise ValidationError(
35
+ f"尺寸格式必须是 {ImageConfig.PRESET_SIZES} 或 'WIDTHxHEIGHT' 格式(如 2048x2048)",
36
+ field="size",
37
+ value=size
38
+ )
39
+
40
+ width, height = int(match.group(1)), int(match.group(2))
41
+
42
+ if width <= 0 or height <= 0:
43
+ raise ValidationError(
44
+ "宽度和高度必须为正整数",
45
+ field="size",
46
+ value=size
47
+ )
48
+
49
+ total_pixels = width * height
50
+ if not (ImageConfig.MIN_TOTAL_PIXELS <= total_pixels <= ImageConfig.MAX_TOTAL_PIXELS):
51
+ raise ValidationError(
52
+ f"总像素数必须在 [{ImageConfig.MIN_TOTAL_PIXELS:,}, {ImageConfig.MAX_TOTAL_PIXELS:,}] 范围内,"
53
+ f"当前值: {total_pixels:,} ({width}x{height})",
54
+ field="size",
55
+ value=size
56
+ )
57
+
58
+ aspect_ratio = width / height
59
+ if not (ImageConfig.MIN_ASPECT_RATIO <= aspect_ratio <= ImageConfig.MAX_ASPECT_RATIO):
60
+ raise ValidationError(
61
+ f"宽高比必须在 [1:16, 16:1] 范围内,当前值: {width}:{height} ({aspect_ratio:.2f})",
62
+ field="size",
63
+ value=size
64
+ )
65
+
66
+ return size
67
+
68
+ @staticmethod
69
+ def validate_or_default(size: str) -> str:
70
+ try:
71
+ return ImageSize.validate(size)
72
+ except ValidationError:
73
+ return ImageConfig.DEFAULT_SIZE
74
+
75
+
76
+ class ImageGenerationRequest(BaseModel):
77
+ prompt: str = Field(..., description="用于生成图像的提示词")
78
+ size: str = Field(default=ImageConfig.DEFAULT_SIZE, description="生成图片的尺寸")
79
+ watermark: bool = Field(default=ImageConfig.DEFAULT_WATERMARK, description="是否添加水印")
80
+ image: Optional[Union[str, List[str]]] = Field(default=None, description="参考图片")
81
+ response_format: Literal["url", "b64_json"] = Field(
82
+ default=ImageConfig.DEFAULT_RESPONSE_FORMAT,
83
+ description="返回格式"
84
+ )
85
+ optimize_prompt_mode: str = Field(
86
+ default=ImageConfig.DEFAULT_OPTIMIZE_PROMPT_MODE,
87
+ description="提示词优化模式"
88
+ )
89
+ sequential_image_generation: Literal["auto", "disabled"] = Field(
90
+ default=ImageConfig.DEFAULT_SEQUENTIAL_IMAGE_GENERATION,
91
+ description="组图功能"
92
+ )
93
+ sequential_image_generation_max_images: int = Field(
94
+ default=ImageConfig.DEFAULT_SEQUENTIAL_IMAGE_GENERATION_MAX_IMAGES,
95
+ ge=1,
96
+ le=15,
97
+ description="最大图片数量"
98
+ )
99
+
100
+ @field_validator('size')
101
+ @classmethod
102
+ def validate_size(cls, v: str) -> str:
103
+ return ImageSize.validate_or_default(v)
104
+
105
+ def to_api_request(self, model: str) -> dict:
106
+ return {
107
+ "model": model,
108
+ "prompt": self.prompt,
109
+ "size": self.size,
110
+ "watermark": self.watermark,
111
+ "image": self.image,
112
+ "response_format": self.response_format,
113
+ "optimize_prompt_options": {
114
+ "mode": self.optimize_prompt_mode,
115
+ },
116
+ "sequential_image_generation": self.sequential_image_generation,
117
+ "sequential_image_generation_options": {
118
+ "max_images": self.sequential_image_generation_max_images,
119
+ },
120
+ }
121
+
122
+
123
+ class ImageData(BaseModel):
124
+ url: Optional[str] = None
125
+ b64_json: Optional[str] = None
126
+ size: Optional[str] = None
127
+ error: Optional[dict] = None
128
+
129
+
130
+ class UsageInfo(BaseModel):
131
+ generated_images: int = 0
132
+ output_tokens: Optional[int] = None
133
+ total_tokens: Optional[int] = None
134
+
135
+
136
+ class ImageGenerationResponse(BaseModel):
137
+ model: str
138
+ created: int
139
+ data: List[ImageData]
140
+ usage: Optional[UsageInfo] = None
141
+ error: Optional[dict] = None
142
+
143
+ @property
144
+ def success(self) -> bool:
145
+ return self.error is None and all(item.error is None for item in self.data)
146
+
147
+ @property
148
+ def image_urls(self) -> List[str]:
149
+ return [item.url for item in self.data if item.url]
150
+
151
+ @property
152
+ def image_b64_list(self) -> List[str]:
153
+ return [item.b64_json for item in self.data if item.b64_json]
154
+
155
+ @property
156
+ def error_messages(self) -> List[str]:
157
+ messages = []
158
+ if self.error:
159
+ messages.append(f"API错误: {self.error.get('message', 'Unknown error')}")
160
+ for item in self.data:
161
+ if item.error:
162
+ messages.append(f"图片生成错误: {item.error.get('message', 'Unknown error')}")
163
+ return messages
@@ -0,0 +1,19 @@
1
+ from .client import KnowledgeClient
2
+ from .models import (
3
+ ChunkConfig,
4
+ DataSourceType,
5
+ KnowledgeChunk,
6
+ KnowledgeDocument,
7
+ KnowledgeInsertResponse,
8
+ KnowledgeSearchResponse,
9
+ )
10
+
11
+ __all__ = [
12
+ "KnowledgeClient",
13
+ "ChunkConfig",
14
+ "DataSourceType",
15
+ "KnowledgeSearchResponse",
16
+ "KnowledgeInsertResponse",
17
+ "KnowledgeChunk",
18
+ "KnowledgeDocument",
19
+ ]
@@ -0,0 +1,148 @@
1
+ from typing import Dict, List, Optional, Union
2
+
3
+ from coze_coding_utils.runtime_ctx.context import Context
4
+
5
+ from ..core.client import BaseClient
6
+ from ..core.config import Config
7
+ from .models import (
8
+ ChunkConfig,
9
+ KnowledgeChunk,
10
+ KnowledgeDocument,
11
+ KnowledgeInsertResponse,
12
+ KnowledgeSearchResponse,
13
+ )
14
+
15
+
16
+ class KnowledgeClient(BaseClient):
17
+ def __init__(
18
+ self,
19
+ config: Optional[Config] = None,
20
+ ctx: Optional[Context] = None,
21
+ custom_headers: Optional[Dict[str, str]] = None,
22
+ verbose: bool = True,
23
+ ):
24
+ super().__init__(config, ctx, custom_headers, verbose)
25
+ # Use COZE_INTEGRATION_BASE_URL as endpoint
26
+ self.base_url = self.config.base_url
27
+
28
+ def search(
29
+ self,
30
+ query: str,
31
+ table_names: Optional[List[str]] = None,
32
+ top_k: int = 5,
33
+ min_score: Optional[float] = 0.0,
34
+ extra_headers: Optional[Dict[str, str]] = None,
35
+ ) -> KnowledgeSearchResponse:
36
+ """
37
+ Search for knowledge chunks in specified tables.
38
+
39
+ Args:
40
+ query: The search query string.
41
+ table_names: List of table names to search in (dataset).
42
+ top_k: Number of results to return. Default is 5.
43
+ min_score: Minimum similarity score. Default is 0.0.
44
+ extra_headers: Extra headers to send with the request.
45
+
46
+ Returns:
47
+ KnowledgeSearchResponse: The search results.
48
+ """
49
+ payload = {
50
+ "query": query,
51
+ "top_k": top_k,
52
+ "min_score": min_score,
53
+ }
54
+ if table_names:
55
+ payload["dataset"] = table_names
56
+
57
+ url = f"{self.base_url}/v1/knowledge_base/recall"
58
+
59
+ headers = extra_headers or {}
60
+ if self.config.api_key:
61
+ headers["x-coze-token"] = f"Bearer {self.config.api_key}"
62
+
63
+ response = self._request(
64
+ method="POST",
65
+ url=url,
66
+ json=payload,
67
+ headers=headers,
68
+ )
69
+
70
+ # Response structure: { "data": [ ... ], "BaseResp": ... }
71
+ # RecallDataInfo: { "slice": "...", "score": 0.0, "chunk_id": "...", "doc_id": "..." }
72
+
73
+ data_list = response.get("data", [])
74
+
75
+ chunks = []
76
+ for item in data_list:
77
+ # Map RecallDataInfo to KnowledgeChunk
78
+ chunks.append(KnowledgeChunk(
79
+ content=item.get("slice", ""),
80
+ score=item.get("score", 0.0),
81
+ chunk_id=item.get("chunk_id"),
82
+ doc_id=item.get("doc_id")
83
+ ))
84
+
85
+ return KnowledgeSearchResponse(
86
+ chunks=chunks,
87
+ code=response.get("code", 0),
88
+ msg=response.get("msg", "")
89
+ )
90
+
91
+ def add_documents(
92
+ self,
93
+ documents: List[Union[KnowledgeDocument, Dict]],
94
+ table_name: str,
95
+ chunk_config: Optional[ChunkConfig] = None,
96
+ extra_headers: Optional[Dict[str, str]] = None,
97
+ ) -> KnowledgeInsertResponse:
98
+ """
99
+ Add documents to a specified table in the knowledge base (BatchImportData).
100
+
101
+ Args:
102
+ documents: List of documents to add. Can be KnowledgeDocument objects or dictionaries.
103
+ table_name: The name of the table to add documents to (dataset).
104
+ chunk_config: Optional chunking configuration.
105
+ extra_headers: Extra headers to send with the request.
106
+
107
+ Returns:
108
+ KnowledgeInsertResponse: The response containing inserted document IDs and status.
109
+ """
110
+ docs_payload = []
111
+ for doc in documents:
112
+ if isinstance(doc, KnowledgeDocument):
113
+ docs_payload.append(doc.to_api_format())
114
+ else:
115
+ # Assuming dictionary is already in correct format or close to it
116
+ docs_payload.append(doc)
117
+
118
+ payload = {
119
+ "dataset": table_name,
120
+ "data": docs_payload,
121
+ }
122
+
123
+ if chunk_config:
124
+ payload["chunk_config"] = chunk_config.model_dump(exclude_none=True)
125
+
126
+ url = f"{self.base_url}/v1/knowledge_base/batch_import"
127
+
128
+ headers = extra_headers or {}
129
+ if self.config.api_key:
130
+ headers["x-coze-token"] = f"Bearer {self.config.api_key}"
131
+
132
+ response = self._request(
133
+ method="POST",
134
+ url=url,
135
+ json=payload,
136
+ headers=headers,
137
+ )
138
+
139
+ code = response.get("code", 0)
140
+ msg = response.get("msg", "")
141
+ if code == 0:
142
+ msg = "成功!文档正在异步导入中,请稍候。您可以查看数据库中的 'knowledge' schema 来验证导入状态。"
143
+
144
+ return KnowledgeInsertResponse(
145
+ doc_ids=response.get("doc_ids"),
146
+ code=code,
147
+ msg=msg
148
+ )
@@ -0,0 +1,45 @@
1
+ from enum import IntEnum
2
+ from typing import List, Optional, Dict, Any, Union
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class DataSourceType(IntEnum):
7
+ TEXT = 0
8
+ URL = 1
9
+ URI = 2
10
+
11
+
12
+ class ChunkConfig(BaseModel):
13
+ separator: str = Field(..., description="Chunk separator")
14
+ max_tokens: int = Field(..., description="Max tokens per chunk")
15
+ remove_extra_spaces: bool = Field(False, description="Normalize extra spaces")
16
+ remove_urls_emails: bool = Field(False, description="Strip URLs and emails")
17
+
18
+
19
+ class KnowledgeDocument(BaseModel):
20
+ source: DataSourceType = Field(..., description="Data source type")
21
+ raw_data: Optional[str] = Field(None, description="Plain text content (for RAW_TEXT)")
22
+ uri: Optional[str] = Field(None, description="Object storage URI")
23
+ url: Optional[str] = Field(None, description="Document download link")
24
+
25
+ def to_api_format(self) -> Dict[str, Any]:
26
+ return self.model_dump(exclude_none=True)
27
+
28
+
29
+ class KnowledgeChunk(BaseModel):
30
+ content: str = Field(..., description="The content of the chunk (slice)")
31
+ score: float = Field(..., description="Similarity score")
32
+ chunk_id: Optional[str] = Field(None, description="Unique identifier of the chunk")
33
+ doc_id: Optional[str] = Field(None, description="Unique identifier of the document")
34
+
35
+
36
+ class KnowledgeSearchResponse(BaseModel):
37
+ chunks: List[KnowledgeChunk] = Field(default_factory=list, description="List of matching knowledge chunks")
38
+ code: int = Field(..., description="Status code")
39
+ msg: str = Field(..., description="Status message")
40
+
41
+
42
+ class KnowledgeInsertResponse(BaseModel):
43
+ doc_ids: Optional[List[str]] = Field(None, description="List of inserted document IDs")
44
+ code: int = Field(..., description="Status code")
45
+ msg: str = Field(..., description="Status message")
@@ -0,0 +1,25 @@
1
+ from .client import LLMClient
2
+ from .models import (
3
+ LLMConfig,
4
+ ThinkingConfig,
5
+ CachingConfig,
6
+ TextContent,
7
+ ImageURLContent,
8
+ VideoURLContent,
9
+ ImageURLDetail,
10
+ VideoURLDetail,
11
+ MessageContent
12
+ )
13
+
14
+ __all__ = [
15
+ "LLMClient",
16
+ "LLMConfig",
17
+ "ThinkingConfig",
18
+ "CachingConfig",
19
+ "TextContent",
20
+ "ImageURLContent",
21
+ "VideoURLContent",
22
+ "ImageURLDetail",
23
+ "VideoURLDetail",
24
+ "MessageContent"
25
+ ]