xparse-client 0.2.19__py3-none-any.whl → 0.3.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. example/1_basic_api_usage.py +198 -0
  2. example/2_async_job.py +210 -0
  3. example/3_local_workflow.py +300 -0
  4. example/4_advanced_workflow.py +327 -0
  5. example/README.md +128 -0
  6. example/config_example.json +95 -0
  7. tests/conftest.py +310 -0
  8. tests/unit/__init__.py +1 -0
  9. tests/unit/api/__init__.py +1 -0
  10. tests/unit/api/test_extract.py +232 -0
  11. tests/unit/api/test_local.py +231 -0
  12. tests/unit/api/test_parse.py +374 -0
  13. tests/unit/api/test_pipeline.py +369 -0
  14. tests/unit/api/test_workflows.py +108 -0
  15. tests/unit/connectors/test_ftp.py +525 -0
  16. tests/unit/connectors/test_local_connectors.py +324 -0
  17. tests/unit/connectors/test_milvus.py +368 -0
  18. tests/unit/connectors/test_qdrant.py +399 -0
  19. tests/unit/connectors/test_s3.py +598 -0
  20. tests/unit/connectors/test_smb.py +442 -0
  21. tests/unit/connectors/test_utils.py +335 -0
  22. tests/unit/models/test_local.py +54 -0
  23. tests/unit/models/test_pipeline_stages.py +144 -0
  24. tests/unit/models/test_workflows.py +55 -0
  25. tests/unit/test_base.py +437 -0
  26. tests/unit/test_client.py +110 -0
  27. tests/unit/test_config.py +160 -0
  28. tests/unit/test_exceptions.py +182 -0
  29. tests/unit/test_http.py +562 -0
  30. xparse_client/__init__.py +111 -20
  31. xparse_client/_base.py +179 -0
  32. xparse_client/_client.py +218 -0
  33. xparse_client/_config.py +221 -0
  34. xparse_client/_http.py +350 -0
  35. xparse_client/api/__init__.py +14 -0
  36. xparse_client/api/extract.py +109 -0
  37. xparse_client/api/local.py +215 -0
  38. xparse_client/api/parse.py +209 -0
  39. xparse_client/api/pipeline.py +134 -0
  40. xparse_client/api/workflows.py +204 -0
  41. xparse_client/connectors/__init__.py +45 -0
  42. xparse_client/connectors/_utils.py +138 -0
  43. xparse_client/connectors/destinations/__init__.py +45 -0
  44. xparse_client/connectors/destinations/base.py +116 -0
  45. xparse_client/connectors/destinations/local.py +91 -0
  46. xparse_client/connectors/destinations/milvus.py +229 -0
  47. xparse_client/connectors/destinations/qdrant.py +238 -0
  48. xparse_client/connectors/destinations/s3.py +163 -0
  49. xparse_client/connectors/sources/__init__.py +45 -0
  50. xparse_client/connectors/sources/base.py +74 -0
  51. xparse_client/connectors/sources/ftp.py +278 -0
  52. xparse_client/connectors/sources/local.py +176 -0
  53. xparse_client/connectors/sources/s3.py +232 -0
  54. xparse_client/connectors/sources/smb.py +259 -0
  55. xparse_client/exceptions.py +398 -0
  56. xparse_client/models/__init__.py +60 -0
  57. xparse_client/models/chunk.py +39 -0
  58. xparse_client/models/embed.py +62 -0
  59. xparse_client/models/extract.py +41 -0
  60. xparse_client/models/local.py +38 -0
  61. xparse_client/models/parse.py +136 -0
  62. xparse_client/models/pipeline.py +134 -0
  63. xparse_client/models/workflows.py +74 -0
  64. xparse_client-0.3.0b3.dist-info/METADATA +1075 -0
  65. xparse_client-0.3.0b3.dist-info/RECORD +68 -0
  66. {xparse_client-0.2.19.dist-info → xparse_client-0.3.0b3.dist-info}/WHEEL +1 -1
  67. {xparse_client-0.2.19.dist-info → xparse_client-0.3.0b3.dist-info}/licenses/LICENSE +1 -1
  68. {xparse_client-0.2.19.dist-info → xparse_client-0.3.0b3.dist-info}/top_level.txt +2 -0
  69. xparse_client/pipeline/__init__.py +0 -3
  70. xparse_client/pipeline/config.py +0 -129
  71. xparse_client/pipeline/destinations.py +0 -489
  72. xparse_client/pipeline/pipeline.py +0 -690
  73. xparse_client/pipeline/sources.py +0 -583
  74. xparse_client-0.2.19.dist-info/METADATA +0 -1050
  75. xparse_client-0.2.19.dist-info/RECORD +0 -11
@@ -0,0 +1,209 @@
1
+ """Parse API - 文档解析
2
+
3
+ 支持同步和异步两种解析模式。
4
+
5
+ Example:
6
+ >>> # 同步解析
7
+ >>> result = client.parse.partition(file=file_bytes, filename="doc.pdf")
8
+ >>>
9
+ >>> # 异步解析
10
+ >>> job = client.parse.create_async_job(file=file_bytes, filename="doc.pdf")
11
+ >>> result = client.parse.get_result(job_id=job.job_id)
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import time
18
+ from typing import TYPE_CHECKING
19
+
20
+ from .._base import BaseAPI
21
+ from ..exceptions import APIError, RequestTimeoutError
22
+ from ..models.parse import (
23
+ AsyncJobResponse,
24
+ JobStatusResponse,
25
+ ParseConfig,
26
+ ParseResponse,
27
+ )
28
+
29
+ if TYPE_CHECKING:
30
+ pass
31
+
32
+
33
+ class Parse(BaseAPI):
34
+ """Parse API - 文档解析
35
+
36
+ 提供文档解析功能,支持同步和异步两种模式。
37
+
38
+ 同步模式:
39
+ 直接返回解析结果,适用于小文件或需要立即获取结果的场景。
40
+
41
+ 异步模式:
42
+ 返回 job_id,通过轮询获取结果,适用于大文件处理。
43
+
44
+ Attributes:
45
+ _base_path: API 路径前缀
46
+ """
47
+
48
+ _base_path = "/api/xparse"
49
+
50
+ def partition(
51
+ self,
52
+ *,
53
+ file: bytes,
54
+ filename: str,
55
+ config: ParseConfig | None = None,
56
+ ) -> ParseResponse:
57
+ """同步解析文档
58
+
59
+ Args:
60
+ file: 文件内容(字节)
61
+ filename: 文件名(用于确定文件类型)
62
+ config: 解析配置(可选)
63
+
64
+ Returns:
65
+ ParseResponse: 解析结果,包含元素列表
66
+
67
+ Raises:
68
+ ValidationError: 参数验证失败
69
+ APIError: API 调用失败
70
+
71
+ Example:
72
+ >>> result = client.parse.partition(
73
+ ... file=file_bytes,
74
+ ... filename="document.pdf",
75
+ ... config=ParseConfig(provider="textin")
76
+ ... )
77
+ >>> for element in result.elements:
78
+ ... print(f"{element.type}: {element.text[:50]}")
79
+ """
80
+ files = {"file": (filename, file)}
81
+ data = {}
82
+
83
+ if config:
84
+ data["config"] = json.dumps(config.model_dump(), ensure_ascii=False)
85
+
86
+ response = self._post("/parse/sync", files=files, data=data)
87
+ return self._parse_response(response, ParseResponse)
88
+
89
+
90
+ def create_async_job(
91
+ self,
92
+ *,
93
+ file: bytes,
94
+ filename: str,
95
+ config: ParseConfig | None = None,
96
+ webhook: str | None = None,
97
+ ) -> AsyncJobResponse:
98
+ """创建异步解析任务
99
+
100
+ 创建一个异步任务来处理文档,适用于大文件或批量处理场景。
101
+
102
+ Args:
103
+ file: 文件内容
104
+ filename: 文件名
105
+ config: 解析配置(可选)
106
+ webhook: 任务完成后的回调 URL(可选)
107
+
108
+ Returns:
109
+ AsyncJobResponse: 包含 job_id
110
+
111
+ Example:
112
+ >>> job = client.parse.create_async_job(
113
+ ... file=file_bytes,
114
+ ... filename="large_doc.pdf",
115
+ ... webhook="https://example.com/callback"
116
+ ... )
117
+ >>> print(f"任务已创建: {job.job_id}")
118
+ """
119
+ files = {"file": (filename, file)}
120
+ data = {}
121
+
122
+ if config:
123
+ data["config"] = json.dumps(config.model_dump(), ensure_ascii=False)
124
+ if webhook:
125
+ data["webhook"] = webhook
126
+
127
+ response = self._post("/parse/async", files=files, data=data)
128
+ return self._parse_response(response, AsyncJobResponse)
129
+
130
+
131
+ def get_result(self, *, job_id: str) -> JobStatusResponse:
132
+ """获取异步解析结果
133
+
134
+ 查询异步任务的状态和结果。
135
+
136
+ Args:
137
+ job_id: 任务 ID
138
+
139
+ Returns:
140
+ JobStatusResponse: 任务状态和结果
141
+
142
+ Example:
143
+ >>> result = client.parse.get_result(job_id="job_abc123")
144
+ >>> if result.is_completed:
145
+ ... print(f"解析完成,共 {len(result.elements)} 个元素")
146
+ >>> elif result.is_failed:
147
+ ... print(f"解析失败: {result.error_message}")
148
+ """
149
+ response = self._get(f"/parse/async/{job_id}")
150
+ return self._parse_response(response, JobStatusResponse)
151
+
152
+
153
+ def wait_for_result(
154
+ self,
155
+ *,
156
+ job_id: str,
157
+ timeout_seconds: float = 3600,
158
+ poll_interval_seconds: float = 5,
159
+ ) -> JobStatusResponse:
160
+ """等待异步任务完成
161
+
162
+ 轮询任务状态直到完成或超时。
163
+
164
+ Args:
165
+ job_id: 任务 ID
166
+ timeout_seconds: 超时时间(秒),默认 3600
167
+ poll_interval_seconds: 轮询间隔(秒),默认 5
168
+
169
+ Returns:
170
+ JobStatusResponse: 完成的任务结果
171
+
172
+ Raises:
173
+ RequestTimeoutError: 等待超时
174
+ APIError: 任务失败
175
+
176
+ Example:
177
+ >>> job = client.parse.create_async_job(file=file_bytes, filename="doc.pdf")
178
+ >>> result = client.parse.wait_for_result(
179
+ ... job_id=job.job_id,
180
+ ... timeout_seconds=600,
181
+ ... poll_interval_seconds=10
182
+ ... )
183
+ """
184
+ start_time = time.time()
185
+
186
+ while True:
187
+ result = self.get_result(job_id=job_id)
188
+
189
+ if result.is_completed:
190
+ return result
191
+
192
+ if result.is_failed:
193
+ raise APIError(
194
+ f"解析任务失败: {result.error_message}",
195
+ details={"job_id": job_id},
196
+ )
197
+
198
+ elapsed = time.time() - start_time
199
+ if elapsed > timeout_seconds:
200
+ raise RequestTimeoutError(
201
+ f"等待解析任务超时: {job_id}",
202
+ timeout_seconds=timeout_seconds,
203
+ )
204
+
205
+ time.sleep(poll_interval_seconds)
206
+
207
+
208
+
209
+ __all__ = ["Parse"]
@@ -0,0 +1,134 @@
1
+ """Pipeline API - 自定义流水线
2
+
3
+ 支持自定义 stages 组合执行。
4
+
5
+ Example:
6
+ >>> stages = [
7
+ ... PipelineStage(type="parse", config={"provider": "textin"}),
8
+ ... PipelineStage(type="chunk", config={"strategy": "basic"}),
9
+ ... PipelineStage(type="embed", config={"provider": "qwen"})
10
+ ... ]
11
+ >>> result = client.pipeline.execute(
12
+ ... file=file_bytes,
13
+ ... filename="doc.pdf",
14
+ ... stages=stages
15
+ ... )
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ from typing import TYPE_CHECKING, Any
22
+
23
+ from .._base import BaseAPI
24
+ from ..models.pipeline import (
25
+ PipelineConfig,
26
+ PipelineResponse,
27
+ PipelineStage,
28
+ )
29
+
30
+ if TYPE_CHECKING:
31
+ pass
32
+
33
+
34
+ class PipelineAPI(BaseAPI):
35
+ """Pipeline API - 自定义流水线
36
+
37
+ 支持灵活的 stages 组合:
38
+ - parse: 文档解析(必选,必须是第一个 stage)
39
+ - chunk: 文本分块
40
+ - embed: 向量化
41
+ - extract: 信息抽取(只能紧跟在 parse 后面)
42
+
43
+ Example:
44
+ >>> # RAG 场景: parse -> chunk -> embed
45
+ >>> stages = [
46
+ ... PipelineStage(type="parse", config={"provider": "textin"}),
47
+ ... PipelineStage(type="chunk", config={"strategy": "basic", "max_characters": 1000}),
48
+ ... PipelineStage(type="embed", config={"provider": "qwen", "model": "text-embedding-v3"})
49
+ ... ]
50
+ >>> result = client.pipeline.execute(file=file_bytes, filename="doc.pdf", stages=stages)
51
+ >>>
52
+ >>> # 结构化抽取场景: parse -> extract
53
+ >>> stages = [
54
+ ... PipelineStage(type="parse"),
55
+ ... PipelineStage(type="extract", config={"schema": {...}})
56
+ ... ]
57
+ >>> result = client.pipeline.execute(file=file_bytes, filename="invoice.pdf", stages=stages)
58
+ """
59
+
60
+ _base_path = "/api/xparse"
61
+
62
+ def execute(
63
+ self,
64
+ *,
65
+ file: bytes,
66
+ filename: str,
67
+ stages: list[PipelineStage | dict[str, Any]],
68
+ config: PipelineConfig | None = None,
69
+ data_source: dict[str, Any] | None = None,
70
+ ) -> PipelineResponse:
71
+ """同步执行 Pipeline
72
+
73
+ Args:
74
+ file: 文件内容
75
+ filename: 文件名
76
+ stages: 阶段配置列表(可以是 PipelineStage 对象或字典)
77
+ config: Pipeline 全局配置(可选)
78
+ data_source: 数据源信息(可选,用于追踪)
79
+
80
+ Returns:
81
+ PipelineResponse: 执行结果
82
+
83
+ Example:
84
+ >>> # 使用 PipelineStage 对象
85
+ >>> stages = [
86
+ ... PipelineStage(type="parse", config={"provider": "textin"}),
87
+ ... PipelineStage(type="chunk", config={"strategy": "basic"}),
88
+ ... PipelineStage(type="embed", config={"provider": "qwen"})
89
+ ... ]
90
+ >>>
91
+ >>> # 或者使用字典
92
+ >>> stages = [
93
+ ... {"type": "parse", "config": {"provider": "textin"}},
94
+ ... {"type": "chunk", "config": {"strategy": "basic"}},
95
+ ... {"type": "embed", "config": {"provider": "qwen"}}
96
+ ... ]
97
+ >>>
98
+ >>> result = client.pipeline.execute(
99
+ ... file=file_bytes,
100
+ ... filename="doc.pdf",
101
+ ... stages=stages
102
+ ... )
103
+ >>> print(f"处理了 {len(result.elements)} 个元素")
104
+ """
105
+ files = {"file": (filename, file)}
106
+
107
+ # 转换 stages 为可序列化格式
108
+ stages_data = []
109
+ for stage in stages:
110
+ if hasattr(stage, 'model_dump'):
111
+ # Pydantic model (ParseStage, ChunkStage, etc.)
112
+ # 使用 by_alias=True 来确保 schema_ 序列化为 schema
113
+ stages_data.append(stage.model_dump(by_alias=True))
114
+ else:
115
+ # 字典格式
116
+ stages_data.append(stage)
117
+
118
+ data = {
119
+ "stages": json.dumps(stages_data, ensure_ascii=False)
120
+ }
121
+
122
+ if config:
123
+ # 只序列化 API 需要的字段(排除 intermediate_results_destination)
124
+ config_dict = config.model_dump(exclude={"intermediate_results_destination"})
125
+ data["config"] = json.dumps(config_dict, ensure_ascii=False)
126
+ if data_source:
127
+ data["data_source"] = json.dumps(data_source, ensure_ascii=False)
128
+
129
+ response = self._post("/pipeline", files=files, data=data)
130
+ return self._parse_response(response, PipelineResponse)
131
+
132
+
133
+
134
+ __all__ = ["PipelineAPI"]
@@ -0,0 +1,204 @@
1
+ """Workflows API - 远程工作流管理
2
+
3
+ 管理服务端工作流资源,支持创建、列出、更新、删除和运行。
4
+
5
+ 与 local.run_workflow 的区别:
6
+ - workflows: 远程管理,在服务端执行,支持 cron 定时,source/destination 使用 ID
7
+ - local: 本地执行,同步阻塞,source/destination 使用本地对象
8
+
9
+ Example:
10
+ >>> from xparse_client import XParseClient
11
+ >>> from xparse_client.models import ParseStage, ParseConfig, Schedule
12
+ >>>
13
+ >>> client = XParseClient(app_id="...", secret_code="...")
14
+ >>>
15
+ >>> # 创建工作流(目前骨架实现,未连接真实 API)
16
+ >>> workflow = client.workflows.create(
17
+ ... name="daily-processing",
18
+ ... source_id="src_123",
19
+ ... destination_id="dst_456",
20
+ ... stages=[ParseStage(config=ParseConfig())],
21
+ ... schedule=Schedule(cron="0 0 * * *")
22
+ ... )
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import builtins
28
+ from typing import TYPE_CHECKING
29
+
30
+ from .._base import BaseAPI
31
+ from ..models.pipeline import PipelineStage
32
+ from ..models.workflows import Schedule, WorkflowInformation, WorkflowState
33
+
34
+ if TYPE_CHECKING:
35
+ pass
36
+
37
+
38
+ class Workflows(BaseAPI):
39
+ """Workflows API - 远程工作流管理
40
+
41
+ 管理服务端工作流,支持创建、查询、更新、删除和手动触发。
42
+
43
+ 注意:当前为骨架实现,方法会抛出 NotImplementedError。
44
+
45
+ Attributes:
46
+ _base_path: API 路径前缀
47
+ """
48
+
49
+ _base_path = "/api/xparse"
50
+
51
+ def create(
52
+ self,
53
+ *,
54
+ name: str,
55
+ source_id: str,
56
+ destination_id: str,
57
+ stages: builtins.list[PipelineStage],
58
+ schedule: Schedule | None = None,
59
+ ) -> WorkflowInformation:
60
+ """创建工作流
61
+
62
+ 在服务端创建一个远程工作流配置。
63
+
64
+ Args:
65
+ name: 工作流名称
66
+ source_id: 远程数据源 ID(通过 sources API 创建)
67
+ destination_id: 远程目的地 ID(通过 destinations API 创建)
68
+ stages: 处理阶段列表
69
+ schedule: 调度配置(可选,不提供则手动触发)
70
+
71
+ Returns:
72
+ WorkflowInformation: 创建的工作流信息
73
+
74
+ Raises:
75
+ NotImplementedError: 骨架实现,暂未连接真实 API
76
+
77
+ Example:
78
+ >>> workflow = client.workflows.create(
79
+ ... name="daily-docs",
80
+ ... source_id="src_s3_123",
81
+ ... destination_id="dst_milvus_456",
82
+ ... stages=[ParseStage(config=ParseConfig())],
83
+ ... schedule=Schedule(cron="0 0 * * *")
84
+ ... )
85
+ """
86
+ raise NotImplementedError("Workflows.create 尚未实现")
87
+
88
+ def list(
89
+ self,
90
+ *,
91
+ state: WorkflowState | None = None,
92
+ limit: int = 100,
93
+ offset: int = 0,
94
+ ) -> builtins.list[WorkflowInformation]:
95
+ """列出工作流
96
+
97
+ 查询所有工作流,可按状态过滤。
98
+
99
+ Args:
100
+ state: 按状态过滤(可选)
101
+ limit: 返回数量限制
102
+ offset: 偏移量
103
+
104
+ Returns:
105
+ List[WorkflowInformation]: 工作流列表
106
+
107
+ Raises:
108
+ NotImplementedError: 骨架实现,暂未连接真实 API
109
+
110
+ Example:
111
+ >>> workflows = client.workflows.list(state=WorkflowState.ACTIVE)
112
+ """
113
+ raise NotImplementedError("Workflows.list 尚未实现")
114
+
115
+ def get(self, *, workflow_id: str) -> WorkflowInformation:
116
+ """获取工作流详情
117
+
118
+ Args:
119
+ workflow_id: 工作流 ID
120
+
121
+ Returns:
122
+ WorkflowInformation: 工作流信息
123
+
124
+ Raises:
125
+ NotImplementedError: 骨架实现,暂未连接真实 API
126
+
127
+ Example:
128
+ >>> workflow = client.workflows.get(workflow_id="wf_123")
129
+ """
130
+ raise NotImplementedError("Workflows.get 尚未实现")
131
+
132
+ def update(
133
+ self,
134
+ *,
135
+ workflow_id: str,
136
+ name: str | None = None,
137
+ stages: builtins.list[PipelineStage] | None = None,
138
+ schedule: Schedule | None = None,
139
+ state: WorkflowState | None = None,
140
+ ) -> WorkflowInformation:
141
+ """更新工作流
142
+
143
+ 更新工作流配置。
144
+
145
+ Args:
146
+ workflow_id: 工作流 ID
147
+ name: 新名称(可选)
148
+ stages: 新的处理阶段(可选)
149
+ schedule: 新的调度配置(可选)
150
+ state: 新状态(可选,用于暂停/恢复)
151
+
152
+ Returns:
153
+ WorkflowInformation: 更新后的工作流信息
154
+
155
+ Raises:
156
+ NotImplementedError: 骨架实现,暂未连接真实 API
157
+
158
+ Example:
159
+ >>> workflow = client.workflows.update(
160
+ ... workflow_id="wf_123",
161
+ ... state=WorkflowState.PAUSED
162
+ ... )
163
+ """
164
+ raise NotImplementedError("Workflows.update 尚未实现")
165
+
166
+ def delete(self, *, workflow_id: str) -> bool:
167
+ """删除工作流
168
+
169
+ Args:
170
+ workflow_id: 工作流 ID
171
+
172
+ Returns:
173
+ bool: 是否删除成功
174
+
175
+ Raises:
176
+ NotImplementedError: 骨架实现,暂未连接真实 API
177
+
178
+ Example:
179
+ >>> client.workflows.delete(workflow_id="wf_123")
180
+ """
181
+ raise NotImplementedError("Workflows.delete 尚未实现")
182
+
183
+ def run(self, *, workflow_id: str) -> str:
184
+ """手动触发工作流执行
185
+
186
+ 立即执行一次工作流,不等待定时调度。
187
+
188
+ Args:
189
+ workflow_id: 工作流 ID
190
+
191
+ Returns:
192
+ str: 执行任务 ID
193
+
194
+ Raises:
195
+ NotImplementedError: 骨架实现,暂未连接真实 API
196
+
197
+ Example:
198
+ >>> task_id = client.workflows.run(workflow_id="wf_123")
199
+ >>> print(f"任务已创建: {task_id}")
200
+ """
201
+ raise NotImplementedError("Workflows.run 尚未实现")
202
+
203
+
204
+ __all__ = ["Workflows"]
@@ -0,0 +1,45 @@
1
+ """连接器模块
2
+
3
+ 提供从各种数据源读取文件和向各种目的地写入结果的能力。
4
+
5
+ 数据源 (Sources):
6
+ - LocalSource: 本地文件系统
7
+ - S3Source: S3/MinIO 对象存储
8
+ - FtpSource: FTP 服务器
9
+ - SmbSource: SMB/CIFS 共享
10
+
11
+ 目的地 (Destinations):
12
+ - LocalDestination: 本地文件系统
13
+ - S3Destination: S3/MinIO 对象存储
14
+ - MilvusDestination: Milvus/Zilliz 向量数据库
15
+ - QdrantDestination: Qdrant 向量数据库
16
+
17
+ Example:
18
+ >>> from xparse_client.connectors import LocalSource, LocalDestination
19
+ >>> source = LocalSource(directory="./docs", pattern=["*.pdf"])
20
+ >>> dest = LocalDestination(output_dir="./output")
21
+ """
22
+
23
+ from .destinations import (
24
+ Destination,
25
+ LocalDestination,
26
+ MilvusDestination,
27
+ QdrantDestination,
28
+ S3Destination,
29
+ )
30
+ from .sources import FtpSource, LocalSource, S3Source, SmbSource, Source
31
+
32
+ __all__ = [
33
+ # Sources
34
+ "Source",
35
+ "LocalSource",
36
+ "S3Source",
37
+ "FtpSource",
38
+ "SmbSource",
39
+ # Destinations
40
+ "Destination",
41
+ "LocalDestination",
42
+ "S3Destination",
43
+ "MilvusDestination",
44
+ "QdrantDestination",
45
+ ]