local-coze 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. local_coze/__init__.py +110 -0
  2. local_coze/cli/__init__.py +3 -0
  3. local_coze/cli/chat.py +126 -0
  4. local_coze/cli/cli.py +34 -0
  5. local_coze/cli/constants.py +7 -0
  6. local_coze/cli/db.py +81 -0
  7. local_coze/cli/embedding.py +193 -0
  8. local_coze/cli/image.py +162 -0
  9. local_coze/cli/knowledge.py +195 -0
  10. local_coze/cli/search.py +198 -0
  11. local_coze/cli/utils.py +41 -0
  12. local_coze/cli/video.py +191 -0
  13. local_coze/cli/video_edit.py +888 -0
  14. local_coze/cli/voice.py +351 -0
  15. local_coze/core/__init__.py +25 -0
  16. local_coze/core/client.py +253 -0
  17. local_coze/core/config.py +58 -0
  18. local_coze/core/exceptions.py +67 -0
  19. local_coze/database/__init__.py +29 -0
  20. local_coze/database/client.py +170 -0
  21. local_coze/database/migration.py +342 -0
  22. local_coze/embedding/__init__.py +31 -0
  23. local_coze/embedding/client.py +350 -0
  24. local_coze/embedding/models.py +130 -0
  25. local_coze/image/__init__.py +19 -0
  26. local_coze/image/client.py +110 -0
  27. local_coze/image/models.py +163 -0
  28. local_coze/knowledge/__init__.py +19 -0
  29. local_coze/knowledge/client.py +148 -0
  30. local_coze/knowledge/models.py +45 -0
  31. local_coze/llm/__init__.py +25 -0
  32. local_coze/llm/client.py +317 -0
  33. local_coze/llm/models.py +48 -0
  34. local_coze/memory/__init__.py +14 -0
  35. local_coze/memory/client.py +176 -0
  36. local_coze/s3/__init__.py +12 -0
  37. local_coze/s3/client.py +580 -0
  38. local_coze/s3/models.py +18 -0
  39. local_coze/search/__init__.py +19 -0
  40. local_coze/search/client.py +183 -0
  41. local_coze/search/models.py +57 -0
  42. local_coze/video/__init__.py +17 -0
  43. local_coze/video/client.py +347 -0
  44. local_coze/video/models.py +39 -0
  45. local_coze/video_edit/__init__.py +23 -0
  46. local_coze/video_edit/examples.py +340 -0
  47. local_coze/video_edit/frame_extractor.py +176 -0
  48. local_coze/video_edit/models.py +362 -0
  49. local_coze/video_edit/video_edit.py +631 -0
  50. local_coze/voice/__init__.py +17 -0
  51. local_coze/voice/asr.py +82 -0
  52. local_coze/voice/models.py +86 -0
  53. local_coze/voice/tts.py +94 -0
  54. local_coze-0.0.1.dist-info/METADATA +636 -0
  55. local_coze-0.0.1.dist-info/RECORD +58 -0
  56. local_coze-0.0.1.dist-info/WHEEL +4 -0
  57. local_coze-0.0.1.dist-info/entry_points.txt +3 -0
  58. local_coze-0.0.1.dist-info/licenses/LICENSE +21 -0
local_coze/__init__.py ADDED
@@ -0,0 +1,110 @@
1
+ from .core import (
2
+ APIError,
3
+ BaseClient,
4
+ Config,
5
+ ConfigurationError,
6
+ CozeSDKError,
7
+ NetworkError,
8
+ ValidationError,
9
+ )
10
+ from .image import (
11
+ ImageConfig,
12
+ ImageData,
13
+ ImageGenerationClient,
14
+ ImageGenerationRequest,
15
+ ImageGenerationResponse,
16
+ ImageSize,
17
+ UsageInfo,
18
+ )
19
+ from .llm import LLMClient, LLMConfig
20
+ from .knowledge import (
21
+ ChunkConfig,
22
+ KnowledgeChunk,
23
+ KnowledgeClient,
24
+ KnowledgeDocument,
25
+ KnowledgeInsertResponse,
26
+ KnowledgeSearchResponse,
27
+ )
28
+ from .search import ImageItem, SearchClient, WebItem
29
+ from .video import VideoGenerationClient, VideoGenerationTask
30
+ from .voice import ASRClient, ASRRequest, ASRResponse, TTSClient, TTSConfig, TTSRequest
31
+
32
+ from .database import Base, generate_models, get_session, upgrade
33
+
34
+ from .memory import get_memory_saver
35
+
36
+ from .s3 import S3SyncStorage
37
+
38
+ from .embedding import (
39
+ EmbeddingClient,
40
+ EmbeddingConfig,
41
+ EmbeddingInputItem,
42
+ EmbeddingInputImageURL,
43
+ EmbeddingInputVideoURL,
44
+ EmbeddingRequest,
45
+ EmbeddingResponse,
46
+ EmbeddingData,
47
+ EmbeddingUsage,
48
+ MultiEmbeddingConfig,
49
+ SparseEmbeddingConfig,
50
+ SparseEmbeddingItem,
51
+ PromptTokensDetails,
52
+ )
53
+
54
+ __version__ = "0.1.0"
55
+
56
+ __all__ = [
57
+ "Config",
58
+ "BaseClient",
59
+ "CozeSDKError",
60
+ "ConfigurationError",
61
+ "APIError",
62
+ "NetworkError",
63
+ "ValidationError",
64
+ "ImageGenerationClient",
65
+ "ImageConfig",
66
+ "ImageSize",
67
+ "ImageGenerationRequest",
68
+ "ImageGenerationResponse",
69
+ "ImageData",
70
+ "UsageInfo",
71
+ "TTSClient",
72
+ "ASRClient",
73
+ "TTSConfig",
74
+ "TTSRequest",
75
+ "ASRRequest",
76
+ "ASRResponse",
77
+ "LLMClient",
78
+ "LLMConfig",
79
+ "KnowledgeClient",
80
+ "ChunkConfig",
81
+ "DataSourceType",
82
+ "KnowledgeSearchResponse",
83
+ "KnowledgeChunk",
84
+ "KnowledgeDocument",
85
+ "KnowledgeInsertResponse",
86
+ "SearchClient",
87
+ "WebItem",
88
+ "ImageItem",
89
+ "VideoGenerationClient",
90
+ "VideoGenerationTask",
91
+ "Base",
92
+ "get_session",
93
+ "generate_models",
94
+ "upgrade",
95
+ "get_memory_saver",
96
+ "S3SyncStorage",
97
+ "EmbeddingClient",
98
+ "EmbeddingConfig",
99
+ "EmbeddingInputItem",
100
+ "EmbeddingInputImageURL",
101
+ "EmbeddingInputVideoURL",
102
+ "EmbeddingRequest",
103
+ "EmbeddingResponse",
104
+ "EmbeddingData",
105
+ "EmbeddingUsage",
106
+ "MultiEmbeddingConfig",
107
+ "SparseEmbeddingConfig",
108
+ "SparseEmbeddingItem",
109
+ "PromptTokensDetails",
110
+ ]
@@ -0,0 +1,3 @@
1
+ """Coze Coding CLI - Command-line interface for Coze Coding integrations."""
2
+
3
+ __version__ = "0.3.0"
local_coze/cli/chat.py ADDED
@@ -0,0 +1,126 @@
1
+ import json
2
+ import os
3
+ from typing import Optional
4
+
5
+ import click
6
+ from langchain_core.messages import HumanMessage, SystemMessage
7
+ from rich.console import Console
8
+
9
+ from ..core.config import Config
10
+ from ..llm import LLMClient
11
+
12
+ console = Console()
13
+
14
+
15
+ @click.command()
16
+ @click.option("--prompt", "-p", required=True, help="User message content")
17
+ @click.option("--system", "-s", help="System prompt for custom behavior")
18
+ @click.option(
19
+ "--thinking", "-t", is_flag=True, help="Enable chain-of-thought reasoning"
20
+ )
21
+ @click.option(
22
+ "--output", "-o", type=click.Path(), help="Output file path (JSON format)"
23
+ )
24
+ @click.option("--stream", is_flag=True, help="Stream the response in real-time")
25
+ @click.option(
26
+ "--header",
27
+ "-H",
28
+ multiple=True,
29
+ help="自定义 HTTP 请求头 (格式: 'Key: Value' 或 'Key=Value',可多次使用)",
30
+ )
31
+ @click.option("--verbose", "-v", is_flag=True, help="显示详细的 HTTP 请求日志")
32
+ def chat(
33
+ prompt: str,
34
+ system: Optional[str],
35
+ thinking: bool,
36
+ output: Optional[str],
37
+ stream: bool,
38
+ header: tuple,
39
+ verbose: bool,
40
+ ):
41
+ """Chat with AI using natural language."""
42
+ try:
43
+ from .utils import parse_headers
44
+
45
+ config = Config()
46
+ custom_headers = parse_headers(header)
47
+ client = LLMClient(config, custom_headers=custom_headers, verbose=verbose)
48
+
49
+ messages = []
50
+ if system:
51
+ messages.append(SystemMessage(content=system))
52
+ messages.append(HumanMessage(content=prompt))
53
+
54
+ thinking_mode = "enabled" if thinking else "disabled"
55
+
56
+ console.print(f"[bold cyan]Chat with AI...[/bold cyan]")
57
+ console.print(f"Prompt: [yellow]{prompt}[/yellow]")
58
+ if system:
59
+ console.print(f"System: [blue]{system}[/blue]")
60
+ if thinking:
61
+ console.print(f"Thinking: [green]enabled[/green]")
62
+ console.print()
63
+
64
+ if stream:
65
+ console.print("[bold green]Response:[/bold green]")
66
+ full_content = ""
67
+ response_metadata = {}
68
+
69
+ for chunk in client.stream(messages=messages, thinking=thinking_mode):
70
+ if chunk.content:
71
+ console.print(chunk.content, end="")
72
+ full_content += chunk.content
73
+ if chunk.response_metadata:
74
+ response_metadata.update(chunk.response_metadata)
75
+
76
+ console.print("\n")
77
+
78
+ if output:
79
+ result = {
80
+ "prompt": prompt,
81
+ "system": system,
82
+ "thinking": thinking,
83
+ "response": full_content,
84
+ "metadata": response_metadata,
85
+ }
86
+
87
+ output_dir = os.path.dirname(os.path.abspath(output))
88
+ if output_dir:
89
+ os.makedirs(output_dir, exist_ok=True)
90
+
91
+ with open(output, "w", encoding="utf-8") as f:
92
+ json.dump(result, f, ensure_ascii=False, indent=2)
93
+
94
+ console.print(
95
+ f"[green]✓[/green] Response saved to: [bold]{output}[/bold]"
96
+ )
97
+ else:
98
+ response = client.invoke(messages=messages, thinking=thinking_mode)
99
+
100
+ console.print("[bold green]Response:[/bold green]")
101
+ console.print(response.content)
102
+ console.print()
103
+
104
+ if output:
105
+ result = {
106
+ "prompt": prompt,
107
+ "system": system,
108
+ "thinking": thinking,
109
+ "response": response.content,
110
+ "metadata": response.response_metadata,
111
+ }
112
+
113
+ output_dir = os.path.dirname(os.path.abspath(output))
114
+ if output_dir:
115
+ os.makedirs(output_dir, exist_ok=True)
116
+
117
+ with open(output, "w", encoding="utf-8") as f:
118
+ json.dump(result, f, ensure_ascii=False, indent=2)
119
+
120
+ console.print(
121
+ f"[green]✓[/green] Response saved to: [bold]{output}[/bold]"
122
+ )
123
+
124
+ except Exception as e:
125
+ console.print(f"[red]✗ Error: {str(e)}[/red]")
126
+ raise click.Abort()
local_coze/cli/cli.py ADDED
@@ -0,0 +1,34 @@
1
+ import click
2
+
3
+ from .chat import chat
4
+ from .db import db
5
+ from .embedding import embedding
6
+ from .image import image
7
+ from .knowledge import knowledge
8
+ from .search import search
9
+ from .video import video
10
+ from .video_edit import video_edit
11
+ from .voice import asr, tts
12
+
13
+
14
+ @click.group()
15
+ @click.version_option(version="0.5.0", prog_name="coze-coding-ai")
16
+ def main():
17
+ """Coze Coding CLI - AI-powered tools for video generation, embedding, and more."""
18
+ pass
19
+
20
+
21
+ main.add_command(video)
22
+ main.add_command(video_edit)
23
+ main.add_command(image)
24
+ main.add_command(knowledge)
25
+ main.add_command(search)
26
+ main.add_command(tts)
27
+ main.add_command(asr)
28
+ main.add_command(chat)
29
+ main.add_command(db)
30
+ main.add_command(embedding)
31
+
32
+
33
+ if __name__ == "__main__":
34
+ main()
@@ -0,0 +1,7 @@
1
+ """
2
+ CLI 常量定义
3
+ """
4
+
5
+ RUN_MODE_HEADER = "x-run-mode"
6
+ RUN_MODE_TEST = "test_run"
7
+ RUN_MODE_PRODUCTION = "production"
local_coze/cli/db.py ADDED
@@ -0,0 +1,81 @@
1
+ """
2
+ 数据库 CLI 命令
3
+ """
4
+
5
+ import os
6
+ import click
7
+
8
+ # 默认路径
9
+ DEFAULT_MODEL_OUTPUT = "src/storage/database/shared/model.py"
10
+ DEFAULT_MODEL_IMPORT_PATH = "storage.database.shared.model"
11
+ DEFAULT_MODEL_PATH = "src"
12
+
13
+
14
+ def _get_workspace_path() -> str:
15
+ """获取工作目录"""
16
+ return os.getenv("WORKSPACE_PATH", os.getcwd())
17
+
18
+
19
+ @click.group()
20
+ def db():
21
+ """Database management commands."""
22
+ pass
23
+
24
+
25
+ @db.command()
26
+ @click.argument("output_path", required=False, default=None)
27
+ @click.option("--verbose", "-v", is_flag=True, help="Verbose output")
28
+ def generate_models(output_path: str, verbose: bool):
29
+ """
30
+ Generate ORM models from database.
31
+
32
+ OUTPUT_PATH: Path to output model file (default: src/storage/database/shared/model.py)
33
+ """
34
+ from local_coze.database import generate_models as _generate_models
35
+
36
+ if output_path is None:
37
+ workspace = _get_workspace_path()
38
+ output_path = os.path.join(workspace, DEFAULT_MODEL_OUTPUT)
39
+
40
+ try:
41
+ _generate_models(output_path, verbose=verbose)
42
+ click.echo(f"Models generated at {output_path}")
43
+ except Exception as e:
44
+ click.echo(f"Error: {e}", err=True)
45
+ raise SystemExit(1)
46
+
47
+
48
+ @db.command()
49
+ @click.option(
50
+ "--model-import-path",
51
+ default=DEFAULT_MODEL_IMPORT_PATH,
52
+ help=f"Model import path (default: {DEFAULT_MODEL_IMPORT_PATH})",
53
+ )
54
+ @click.option(
55
+ "--model-path",
56
+ default=None,
57
+ help=f"Path to add to sys.path for model import (default: $WORKSPACE_PATH/{DEFAULT_MODEL_PATH})",
58
+ )
59
+ @click.option("--verbose", "-v", is_flag=True, help="Verbose output")
60
+ def upgrade(model_import_path: str, model_path: str, verbose: bool):
61
+ """
62
+ Run database migrations.
63
+
64
+ Automatically generates migration and upgrades to head.
65
+ """
66
+ from local_coze.database import upgrade as _upgrade
67
+
68
+ if model_path is None:
69
+ workspace = _get_workspace_path()
70
+ model_path = os.path.join(workspace, DEFAULT_MODEL_PATH)
71
+
72
+ try:
73
+ _upgrade(
74
+ model_import_path=model_import_path,
75
+ model_path=model_path,
76
+ verbose=verbose,
77
+ )
78
+ click.echo("Database upgraded successfully")
79
+ except Exception as e:
80
+ click.echo(f"Error: {e}", err=True)
81
+ raise SystemExit(1)
@@ -0,0 +1,193 @@
1
+ import json
2
+ import os
3
+ from typing import Optional
4
+
5
+ import click
6
+ from coze_coding_utils.runtime_ctx.context import new_context
7
+ from rich.console import Console
8
+
9
+ from ..core.config import Config
10
+ from ..embedding import EmbeddingClient
11
+ from .constants import RUN_MODE_HEADER, RUN_MODE_TEST
12
+
13
+ console = Console()
14
+
15
+
16
+ @click.command()
17
+ @click.option(
18
+ "--text",
19
+ "-t",
20
+ multiple=True,
21
+ help="Text to embed (can be used multiple times)",
22
+ )
23
+ @click.option(
24
+ "--image-url",
25
+ multiple=True,
26
+ help="Image URL to embed (can be used multiple times)",
27
+ )
28
+ @click.option(
29
+ "--video-url",
30
+ multiple=True,
31
+ help="Video URL to embed (can be used multiple times)",
32
+ )
33
+ @click.option(
34
+ "--dimensions",
35
+ "-d",
36
+ type=int,
37
+ help="Output embedding dimensions",
38
+ )
39
+ @click.option(
40
+ "--instructions",
41
+ help="Instructions for embedding generation",
42
+ )
43
+ @click.option(
44
+ "--multi-embedding",
45
+ is_flag=True,
46
+ help="Enable multi-embedding mode",
47
+ )
48
+ @click.option(
49
+ "--sparse-embedding",
50
+ is_flag=True,
51
+ help="Enable sparse embedding mode",
52
+ )
53
+ @click.option(
54
+ "--output",
55
+ "-o",
56
+ type=click.Path(),
57
+ help="Output file path for embedding JSON",
58
+ )
59
+ @click.option("--mock", is_flag=True, help="使用 mock 模式(测试运行)")
60
+ @click.option(
61
+ "--header",
62
+ "-H",
63
+ multiple=True,
64
+ help="自定义 HTTP 请求头 (格式: 'Key: Value' 或 'Key=Value',可多次使用)",
65
+ )
66
+ @click.option("--verbose", "-v", is_flag=True, help="显示详细的 HTTP 请求日志")
67
+ def embedding(
68
+ text: tuple,
69
+ image_url: tuple,
70
+ video_url: tuple,
71
+ dimensions: Optional[int],
72
+ instructions: Optional[str],
73
+ multi_embedding: bool,
74
+ sparse_embedding: bool,
75
+ output: Optional[str],
76
+ mock: bool,
77
+ header: tuple,
78
+ verbose: bool,
79
+ ):
80
+ """Generate embeddings for text, images, or videos using AI."""
81
+ try:
82
+ has_text = len(text) > 0
83
+ has_image = len(image_url) > 0
84
+ has_video = len(video_url) > 0
85
+
86
+ if not has_text and not has_image and not has_video:
87
+ console.print(
88
+ "[red]Error: At least one of --text, --image-url, or --video-url is required[/red]"
89
+ )
90
+ raise click.Abort()
91
+
92
+ from .utils import parse_headers
93
+
94
+ config = Config()
95
+
96
+ ctx = None
97
+ custom_headers = parse_headers(header) or {}
98
+
99
+ if mock:
100
+ ctx = new_context(method="embedding.embed", headers=custom_headers)
101
+ custom_headers[RUN_MODE_HEADER] = RUN_MODE_TEST
102
+ console.print("[yellow]🧪 Mock 模式已启用(测试运行)[/yellow]")
103
+
104
+ client = EmbeddingClient(
105
+ config, ctx=ctx, custom_headers=custom_headers, verbose=verbose
106
+ )
107
+
108
+ console.print("[bold cyan]Generating embeddings...[/bold cyan]")
109
+
110
+ if has_text:
111
+ console.print(f"Texts: [yellow]{len(text)} items[/yellow]")
112
+ if has_image:
113
+ console.print(f"Images: [yellow]{len(image_url)} items[/yellow]")
114
+ if has_video:
115
+ console.print(f"Videos: [yellow]{len(video_url)} items[/yellow]")
116
+
117
+ response = client.embed(
118
+ texts=list(text) if has_text else None,
119
+ image_urls=list(image_url) if has_image else None,
120
+ video_urls=list(video_url) if has_video else None,
121
+ dimensions=dimensions,
122
+ instructions=instructions,
123
+ multi_embedding=multi_embedding,
124
+ sparse_embedding=sparse_embedding,
125
+ )
126
+
127
+ console.print(f"[green]✓[/green] Embeddings generated successfully!")
128
+ console.print(f"Model: [cyan]{response.model}[/cyan]")
129
+
130
+ if response.embedding:
131
+ console.print(
132
+ f"Embedding dimensions: [cyan]{len(response.embedding)}[/cyan]"
133
+ )
134
+ first_5 = response.embedding[:5]
135
+ console.print(f"First 5 values: [dim]{first_5}...[/dim]")
136
+
137
+ if response.multi_embeddings:
138
+ console.print(
139
+ f"Multi-embedding vectors: [cyan]{len(response.multi_embeddings)}[/cyan]"
140
+ )
141
+
142
+ if response.sparse_embeddings:
143
+ console.print(
144
+ f"Sparse embedding items: [cyan]{len(response.sparse_embeddings)}[/cyan]"
145
+ )
146
+
147
+ if response.usage:
148
+ console.print(f"Tokens used: [dim]{response.usage.total_tokens}[/dim]")
149
+
150
+ if output:
151
+ os.makedirs(os.path.dirname(os.path.abspath(output)), exist_ok=True)
152
+
153
+ output_data = {
154
+ "object": response.object,
155
+ "model": response.model,
156
+ "id": response.id,
157
+ "created": response.created,
158
+ }
159
+
160
+ if response.data:
161
+ output_data["data"] = {
162
+ "object": response.data.object,
163
+ "index": response.data.index,
164
+ }
165
+ if response.data.embedding:
166
+ output_data["data"]["embedding"] = response.data.embedding
167
+ if response.data.multi_embedding:
168
+ output_data["data"]["multi_embedding"] = response.data.multi_embedding
169
+ if response.data.sparse_embedding:
170
+ output_data["data"]["sparse_embedding"] = [
171
+ {"index": item.index, "value": item.value}
172
+ for item in response.data.sparse_embedding
173
+ ]
174
+
175
+ if response.usage:
176
+ output_data["usage"] = {
177
+ "prompt_tokens": response.usage.prompt_tokens,
178
+ "total_tokens": response.usage.total_tokens,
179
+ }
180
+ if response.usage.prompt_tokens_details:
181
+ output_data["usage"]["prompt_tokens_details"] = {
182
+ "image_tokens": response.usage.prompt_tokens_details.image_tokens,
183
+ "text_tokens": response.usage.prompt_tokens_details.text_tokens,
184
+ }
185
+
186
+ with open(output, "w", encoding="utf-8") as f:
187
+ json.dump(output_data, f, indent=2, ensure_ascii=False)
188
+
189
+ console.print(f"[green]✓[/green] Embedding saved to: [bold]{output}[/bold]")
190
+
191
+ except Exception as e:
192
+ console.print(f"[red]✗ Error: {str(e)}[/red]")
193
+ raise click.Abort()