@clickzetta/cz-cli-darwin-x64 0.3.17 → 0.3.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cz-cli +0 -0
- package/bin/skills/clickzetta-batch-sync-pipeline/SKILL.md +386 -0
- package/bin/skills/clickzetta-cdc-sync-pipeline/SKILL.md +548 -0
- package/bin/skills/clickzetta-data-ingest-pipeline/SKILL.md +220 -0
- package/bin/skills/clickzetta-data-ingest-pipeline/eval_cases.jsonl +5 -0
- package/bin/skills/clickzetta-dynamic-table/SKILL.md +112 -0
- package/bin/skills/clickzetta-dynamic-table/best-practices/dimension-table-join-guide.md +257 -0
- package/bin/skills/clickzetta-dynamic-table/best-practices/medallion-and-stream-patterns.md +124 -0
- package/bin/skills/clickzetta-dynamic-table/best-practices/non-partitioned-merge-into-warning.md +96 -0
- package/bin/skills/clickzetta-dynamic-table/best-practices/performance-optimization.md +109 -0
- package/bin/skills/clickzetta-file-import-pipeline/SKILL.md +156 -0
- package/bin/skills/clickzetta-kafka-ingest-pipeline/SKILL.md +751 -0
- package/bin/skills/clickzetta-kafka-ingest-pipeline/eval_cases.jsonl +5 -0
- package/bin/skills/clickzetta-kafka-ingest-pipeline/references/kafka-pipe-syntax.md +324 -0
- package/bin/skills/clickzetta-oss-ingest-pipeline/SKILL.md +537 -0
- package/bin/skills/clickzetta-query-optimizer/SKILL.md +156 -0
- package/bin/skills/clickzetta-query-optimizer/references/explain.md +56 -0
- package/bin/skills/clickzetta-query-optimizer/references/hints-and-sortkey.md +78 -0
- package/bin/skills/clickzetta-query-optimizer/references/optimize.md +65 -0
- package/bin/skills/clickzetta-query-optimizer/references/result-cache.md +49 -0
- package/bin/skills/clickzetta-query-optimizer/references/show-jobs.md +42 -0
- package/bin/skills/clickzetta-realtime-sync-pipeline/SKILL.md +276 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/SKILL.md +379 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/evals/evals.json +166 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/references/dynamic-table.md +185 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/references/materialized-view.md +129 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/references/pipe.md +222 -0
- package/bin/skills/clickzetta-sql-pipeline-manager/references/table-stream.md +125 -0
- package/bin/skills/clickzetta-table-stream-pipeline/SKILL.md +206 -0
- package/bin/skills/clickzetta-vcluster-manager/SKILL.md +212 -0
- package/bin/skills/clickzetta-vcluster-manager/references/vc-cache.md +54 -0
- package/bin/skills/clickzetta-vcluster-manager/references/vcluster-ddl.md +150 -0
- package/bin/skills/clickzetta-volume-manager/SKILL.md +292 -0
- package/bin/skills/clickzetta-volume-manager/references/volume-ddl.md +199 -0
- package/package.json +1 -1
- /package/bin/skills/{dt-creator → clickzetta-dynamic-table/dt-creator}/SKILL.md +0 -0
- /package/bin/skills/{dt-creator → clickzetta-dynamic-table/dt-creator}/references/dt-declaration-strategy.md +0 -0
- /package/bin/skills/{dt-creator → clickzetta-dynamic-table/dt-creator}/references/incremental-config-reference.md +0 -0
- /package/bin/skills/{dt-creator → clickzetta-dynamic-table/dt-creator}/references/refresh-history-guide.md +0 -0
- /package/bin/skills/{dt-creator → clickzetta-dynamic-table/dt-creator}/references/sql-limitations.md +0 -0
- /package/bin/skills/{dynamic-table-alter → clickzetta-dynamic-table/dynamic-table-alter}/SKILL.md +0 -0
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
{"case_id":"001","type":"should_call","user_input":"怎么用 READ_KAFKA Pipe 把 Kafka topic 数据持续导入 Lakehouse?","expected_skill":"clickzetta-kafka-ingest-pipeline","expected_output_contains":["READ_KAFKA","PIPE"]}
|
|
2
|
+
{"case_id":"002","type":"should_call","user_input":"Kafka 消息是多层嵌套 JSON,怎么解析导入?","expected_skill":"clickzetta-kafka-ingest-pipeline","expected_output_contains":["JSON","READ_KAFKA"]}
|
|
3
|
+
{"case_id":"003","type":"should_call","user_input":"Kafka Pipe 延迟很高怎么排查?怎么监控 offsetLag?","expected_skill":"clickzetta-kafka-ingest-pipeline","expected_output_contains":["pipe_latency","offsetLag"]}
|
|
4
|
+
{"case_id":"004","type":"should_call","user_input":"Kafka 需要 SASL 认证,Pipe 怎么配置?","expected_skill":"clickzetta-kafka-ingest-pipeline","expected_output_contains":["SASL"]}
|
|
5
|
+
{"case_id":"005","type":"should_call","user_input":"Kafka 外部表和 READ_KAFKA Pipe 有什么区别?该选哪个?","expected_skill":"clickzetta-kafka-ingest-pipeline","expected_output_contains":["外部表","READ_KAFKA","Table Stream"]}
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
# Kafka Pipe SQL 语法参考
|
|
2
|
+
|
|
3
|
+
> 来源:https://www.yunqi.tech/documents/pipe-kafka 和 https://www.yunqi.tech/documents/pipe-kafka-bestpractice-1
|
|
4
|
+
|
|
5
|
+
> **⚠️ ClickZetta READ_KAFKA 使用位置参数(positional parameters)**
|
|
6
|
+
> - ❌ 不支持 `=>` 命名参数语法(如 `KAFKA_BROKER => 'host:port'`)
|
|
7
|
+
> - ❌ 不支持 `TABLE(READ_KAFKA(...))` 包装
|
|
8
|
+
> - ✅ 正确:`FROM read_kafka('broker', 'topic', '', 'group', '', '', '', '', 'raw', 'raw', 0, MAP(...))`
|
|
9
|
+
|
|
10
|
+
## CREATE PIPE(READ_KAFKA 方式)
|
|
11
|
+
|
|
12
|
+
```sql
|
|
13
|
+
CREATE [ OR REPLACE ] PIPE <pipe_name>
|
|
14
|
+
VIRTUAL_CLUSTER = '<vcluster_name>'
|
|
15
|
+
[ BATCH_INTERVAL_IN_SECONDS = '<seconds>' ]
|
|
16
|
+
[ BATCH_SIZE_PER_KAFKA_PARTITION = '<count>' ]
|
|
17
|
+
[ MAX_SKIP_BATCH_COUNT_ON_ERROR = '<count>' ]
|
|
18
|
+
[ INITIAL_DELAY_IN_SECONDS = '<seconds>' ]
|
|
19
|
+
[ RESET_KAFKA_GROUP_OFFSETS = '<offset_value>' ]
|
|
20
|
+
[ COPY_JOB_HINT = '<json>' ]
|
|
21
|
+
AS
|
|
22
|
+
COPY INTO <target_table> FROM (
|
|
23
|
+
SELECT <expr> [, ...]
|
|
24
|
+
FROM read_kafka(
|
|
25
|
+
'<bootstrap_servers>', -- 位置 1:Kafka 集群地址(必填)
|
|
26
|
+
'<topic_name>', -- 位置 2:Topic 名称(必填)
|
|
27
|
+
'', -- 位置 3:Topic pattern(保留,填空字符串)
|
|
28
|
+
'<group_id>', -- 位置 4:消费者组 ID(必填)
|
|
29
|
+
'', -- 位置 5:starting_offsets(Pipe 中留空)
|
|
30
|
+
'', -- 位置 6:ending_offsets(Pipe 中留空)
|
|
31
|
+
'', -- 位置 7:starting_timestamp(Pipe 中留空)
|
|
32
|
+
'', -- 位置 8:ending_timestamp(Pipe 中留空)
|
|
33
|
+
'raw', -- 位置 9:key 格式(目前只支持 raw)
|
|
34
|
+
'raw', -- 位置 10:value 格式(目前只支持 raw)
|
|
35
|
+
0, -- 位置 11:max_errors
|
|
36
|
+
MAP(<kafka_config>) -- 位置 12:Kafka 配置参数
|
|
37
|
+
)
|
|
38
|
+
);
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Pipe 参数说明
|
|
42
|
+
|
|
43
|
+
| 参数 | 必填 | 默认值 | 说明 |
|
|
44
|
+
|------|------|--------|------|
|
|
45
|
+
| `VIRTUAL_CLUSTER` | 是 | — | 执行 Pipe 任务的计算集群 |
|
|
46
|
+
| `BATCH_INTERVAL_IN_SECONDS` | 否 | 60 | 批处理间隔(秒),即数据新鲜度 |
|
|
47
|
+
| `BATCH_SIZE_PER_KAFKA_PARTITION` | 否 | 500000 | 每个 Kafka 分区每批最大消息数 |
|
|
48
|
+
| `MAX_SKIP_BATCH_COUNT_ON_ERROR` | 否 | 30 | 出错时跳过批次的最大重试次数 |
|
|
49
|
+
| `INITIAL_DELAY_IN_SECONDS` | 否 | 0 | 首个作业调度延迟 |
|
|
50
|
+
| `RESET_KAFKA_GROUP_OFFSETS` | 否 | — | 启动时消费位点(仅创建时生效) |
|
|
51
|
+
| `COPY_JOB_HINT` | 否 | — | JSON 格式的作业参数 |
|
|
52
|
+
|
|
53
|
+
### RESET_KAFKA_GROUP_OFFSETS 可选值
|
|
54
|
+
|
|
55
|
+
| 值 | 说明 |
|
|
56
|
+
|----|------|
|
|
57
|
+
| `'none'` | 无操作,使用 Kafka `auto.offset.reset`(默认 latest) |
|
|
58
|
+
| `'valid'` | 检查当前位点是否过期,将过期分区重置到 earliest |
|
|
59
|
+
| `'earliest'` | 重置到最早位点 |
|
|
60
|
+
| `'latest'` | 重置到最新位点 |
|
|
61
|
+
| `'<毫秒时间戳>'` | 重置到指定时间戳对应位点(如 `'1737789688000'`) |
|
|
62
|
+
|
|
63
|
+
### READ_KAFKA 参数(在 Pipe 中 vs 独立使用)
|
|
64
|
+
|
|
65
|
+
| 特性 | 独立使用 read_kafka | 在 Pipe 中使用 |
|
|
66
|
+
|------|-------------------|---------------|
|
|
67
|
+
| 消费者组 | 临时,执行完即销毁 | 持久,保持消费位置 |
|
|
68
|
+
| 位置管理 | 在 MAP 中设置 `kafka.auto.offset.reset` | Pipe 自动管理,位置参数**必须留空** |
|
|
69
|
+
| 执行方式 | 一次性查询 | 持续调度执行 |
|
|
70
|
+
| 默认起始位置 | latest(可在 MAP 中改为 earliest) | latest(由 RESET_KAFKA_GROUP_OFFSETS 控制) |
|
|
71
|
+
|
|
72
|
+
### MAP 配置参数
|
|
73
|
+
|
|
74
|
+
| 参数 | 说明 |
|
|
75
|
+
|------|------|
|
|
76
|
+
| `kafka.security.protocol` | 安全协议:`PLAINTEXT` 或 `SASL_PLAINTEXT` |
|
|
77
|
+
| `kafka.sasl.mechanism` | SASL 机制:`PLAIN` |
|
|
78
|
+
| `kafka.sasl.username` | SASL 用户名 |
|
|
79
|
+
| `kafka.sasl.password` | SASL 密码 |
|
|
80
|
+
| `kafka.auto.offset.reset` | 独立探查时的起始位点(`earliest` / `latest`) |
|
|
81
|
+
| `cz.kafka.fetch.retry.enable` | 启用 fetch 重试(`true`/`false`) |
|
|
82
|
+
| `cz.kafka.fetch.retry.times` | 重试次数 |
|
|
83
|
+
| `cz.kafka.fetch.retry.intervalMs` | 重试间隔(毫秒) |
|
|
84
|
+
|
|
85
|
+
### JSON 字段提取语法
|
|
86
|
+
|
|
87
|
+
```sql
|
|
88
|
+
-- key 和 value 都是 binary 类型,需要先转换
|
|
89
|
+
value::string -- 转为字符串
|
|
90
|
+
parse_json(value::string) -- 解析为 JSON 对象
|
|
91
|
+
parse_json(value::string)['field']::TYPE -- 提取顶层字段
|
|
92
|
+
parse_json(value::string)['nested']['key']::TYPE -- 提取嵌套字段
|
|
93
|
+
|
|
94
|
+
-- 推荐模式:在子查询中先 parse_json,外层直接用 j['field']
|
|
95
|
+
SELECT j['order_id']::STRING, j['amount']::DECIMAL(10,2)
|
|
96
|
+
FROM (
|
|
97
|
+
SELECT parse_json(value::string) AS j
|
|
98
|
+
FROM read_kafka(...)
|
|
99
|
+
)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### 完整示例
|
|
103
|
+
|
|
104
|
+
```sql
|
|
105
|
+
-- 无认证 Kafka Pipe
|
|
106
|
+
CREATE PIPE kafka_orders_pipe
|
|
107
|
+
VIRTUAL_CLUSTER = 'default'
|
|
108
|
+
BATCH_INTERVAL_IN_SECONDS = '60'
|
|
109
|
+
AS
|
|
110
|
+
COPY INTO ods.orders FROM (
|
|
111
|
+
SELECT
|
|
112
|
+
j['order_id']::STRING AS order_id,
|
|
113
|
+
j['user_id']::STRING AS user_id,
|
|
114
|
+
j['amount']::DECIMAL(10,2) AS amount,
|
|
115
|
+
CAST(`timestamp` AS TIMESTAMP) AS kafka_ts
|
|
116
|
+
FROM (
|
|
117
|
+
SELECT `timestamp`, parse_json(value::string) AS j
|
|
118
|
+
FROM read_kafka(
|
|
119
|
+
'kafka.example.com:9092',
|
|
120
|
+
'orders',
|
|
121
|
+
'',
|
|
122
|
+
'lakehouse_orders',
|
|
123
|
+
'', '', '', '',
|
|
124
|
+
'raw', 'raw', 0,
|
|
125
|
+
MAP('kafka.security.protocol', 'PLAINTEXT')
|
|
126
|
+
)
|
|
127
|
+
)
|
|
128
|
+
);
|
|
129
|
+
|
|
130
|
+
-- SASL 认证 + 指定时间点消费
|
|
131
|
+
CREATE PIPE kafka_secure_pipe
|
|
132
|
+
VIRTUAL_CLUSTER = 'pipe_vc'
|
|
133
|
+
BATCH_INTERVAL_IN_SECONDS = '60'
|
|
134
|
+
RESET_KAFKA_GROUP_OFFSETS = '1737789688000'
|
|
135
|
+
AS
|
|
136
|
+
COPY INTO ods.secure_events FROM (
|
|
137
|
+
SELECT
|
|
138
|
+
j['id']::STRING AS event_id,
|
|
139
|
+
j['payload']::STRING AS payload,
|
|
140
|
+
CAST(`timestamp` AS TIMESTAMP) AS kafka_ts
|
|
141
|
+
FROM (
|
|
142
|
+
SELECT `timestamp`, parse_json(value::string) AS j
|
|
143
|
+
FROM read_kafka(
|
|
144
|
+
'kafka.example.com:9092',
|
|
145
|
+
'secure_events',
|
|
146
|
+
'',
|
|
147
|
+
'cz_secure',
|
|
148
|
+
'', '', '', '',
|
|
149
|
+
'raw', 'raw', 0,
|
|
150
|
+
MAP(
|
|
151
|
+
'kafka.security.protocol', 'SASL_PLAINTEXT',
|
|
152
|
+
'kafka.sasl.mechanism', 'PLAIN',
|
|
153
|
+
'kafka.sasl.username', 'my_user',
|
|
154
|
+
'kafka.sasl.password', 'my_password'
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
);
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## 独立探查(验证连接和数据格式)
|
|
164
|
+
|
|
165
|
+
```sql
|
|
166
|
+
-- 无认证
|
|
167
|
+
SELECT value::string
|
|
168
|
+
FROM read_kafka(
|
|
169
|
+
'kafka.example.com:9092',
|
|
170
|
+
'orders',
|
|
171
|
+
'',
|
|
172
|
+
'test_explore',
|
|
173
|
+
'', '', '', '',
|
|
174
|
+
'raw', 'raw', 0,
|
|
175
|
+
MAP('kafka.security.protocol', 'PLAINTEXT', 'kafka.auto.offset.reset', 'earliest')
|
|
176
|
+
)
|
|
177
|
+
LIMIT 10;
|
|
178
|
+
|
|
179
|
+
-- SASL 认证
|
|
180
|
+
SELECT value::string
|
|
181
|
+
FROM read_kafka(
|
|
182
|
+
'kafka.example.com:9092',
|
|
183
|
+
'orders',
|
|
184
|
+
'',
|
|
185
|
+
'test_explore',
|
|
186
|
+
'', '', '', '',
|
|
187
|
+
'raw', 'raw', 0,
|
|
188
|
+
MAP(
|
|
189
|
+
'kafka.security.protocol', 'SASL_PLAINTEXT',
|
|
190
|
+
'kafka.sasl.mechanism', 'PLAIN',
|
|
191
|
+
'kafka.sasl.username', 'my_user',
|
|
192
|
+
'kafka.sasl.password', 'my_password',
|
|
193
|
+
'kafka.auto.offset.reset', 'earliest'
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
LIMIT 10;
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
---
|
|
200
|
+
|
|
201
|
+
## CREATE PIPE(Kafka 外部表 + Table Stream 方式)
|
|
202
|
+
|
|
203
|
+
### 步骤 1:创建 Kafka Storage Connection
|
|
204
|
+
|
|
205
|
+
```sql
|
|
206
|
+
CREATE STORAGE CONNECTION IF NOT EXISTS <conn_name>
|
|
207
|
+
TYPE KAFKA
|
|
208
|
+
BOOTSTRAP_SERVERS = ['<host1>:<port1>', '<host2>:<port2>']
|
|
209
|
+
SECURITY_PROTOCOL = 'PLAINTEXT';
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
### 步骤 2:创建 Kafka 外部表
|
|
213
|
+
|
|
214
|
+
```sql
|
|
215
|
+
-- ⚠️ 必须显式指定列定义(不能省略)
|
|
216
|
+
-- ⚠️ offset 是保留字,必须用反引号转义
|
|
217
|
+
CREATE EXTERNAL TABLE <ext_table_name> (
|
|
218
|
+
topic STRING,
|
|
219
|
+
partition INT,
|
|
220
|
+
`offset` BIGINT,
|
|
221
|
+
`timestamp` TIMESTAMP,
|
|
222
|
+
timestamp_type STRING,
|
|
223
|
+
headers STRING,
|
|
224
|
+
key BINARY,
|
|
225
|
+
value BINARY
|
|
226
|
+
)
|
|
227
|
+
USING KAFKA
|
|
228
|
+
OPTIONS (
|
|
229
|
+
'group_id' = '<consumer_group>',
|
|
230
|
+
'topics' = '<topic_name>',
|
|
231
|
+
'starting_offset' = '<earliest | latest>'
|
|
232
|
+
)
|
|
233
|
+
CONNECTION <conn_name>;
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
> **注意**:
|
|
237
|
+
> - 列定义是**必须的**,省略会报错 `failed to detect columns`
|
|
238
|
+
> - `offset` 和 `timestamp` 是保留字,需要反引号转义
|
|
239
|
+
> - 删除外部表用 `DROP TABLE`(不是 `DROP EXTERNAL TABLE`)
|
|
240
|
+
|
|
241
|
+
### 步骤 3:创建 Table Stream
|
|
242
|
+
|
|
243
|
+
```sql
|
|
244
|
+
CREATE TABLE STREAM <stream_name>
|
|
245
|
+
ON TABLE <ext_table_name>
|
|
246
|
+
WITH PROPERTIES ('TABLE_STREAM_MODE' = 'APPEND_ONLY');
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
### 步骤 4:创建 Pipe
|
|
250
|
+
|
|
251
|
+
```sql
|
|
252
|
+
CREATE PIPE <pipe_name>
|
|
253
|
+
VIRTUAL_CLUSTER = '<vcluster_name>'
|
|
254
|
+
BATCH_INTERVAL_IN_SECONDS = '60'
|
|
255
|
+
AS
|
|
256
|
+
COPY INTO <target_table>
|
|
257
|
+
SELECT <expr> [, ...]
|
|
258
|
+
FROM <stream_name>;
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
---
|
|
262
|
+
|
|
263
|
+
## ALTER PIPE
|
|
264
|
+
|
|
265
|
+
```sql
|
|
266
|
+
-- 暂停
|
|
267
|
+
ALTER PIPE <pipe_name> SET PIPE_EXECUTION_PAUSED = true;
|
|
268
|
+
|
|
269
|
+
-- 恢复
|
|
270
|
+
ALTER PIPE <pipe_name> SET PIPE_EXECUTION_PAUSED = false;
|
|
271
|
+
|
|
272
|
+
-- 修改 VCluster
|
|
273
|
+
ALTER PIPE <pipe_name> SET VIRTUAL_CLUSTER = 'new_vc';
|
|
274
|
+
|
|
275
|
+
-- 修改 COPY_JOB_HINT
|
|
276
|
+
ALTER PIPE <pipe_name> SET COPY_JOB_HINT = '{"cz.sql.split.kafka.strategy":"size","cz.mapper.kafka.message.size":"200000"}';
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
> ⚠️ **ALTER PIPE 支持的属性**:
|
|
280
|
+
> - ✅ `PIPE_EXECUTION_PAUSED`
|
|
281
|
+
> - ✅ `VIRTUAL_CLUSTER`
|
|
282
|
+
> - ✅ `COPY_JOB_HINT`
|
|
283
|
+
> - ❌ `BATCH_INTERVAL_IN_SECONDS`(不支持,需删除重建)
|
|
284
|
+
> - ❌ `BATCH_SIZE_PER_KAFKA_PARTITION`(不支持,需删除重建)
|
|
285
|
+
>
|
|
286
|
+
> 不支持修改 COPY/INSERT 语句逻辑,需删除 Pipe 后重建。
|
|
287
|
+
> 修改 `COPY_JOB_HINT` 会覆盖所有已有 hints,需一次性设置全部参数。
|
|
288
|
+
|
|
289
|
+
---
|
|
290
|
+
|
|
291
|
+
## 监控
|
|
292
|
+
|
|
293
|
+
```sql
|
|
294
|
+
-- 查看 Pipe 详情(含延迟信息 pipe_latency)
|
|
295
|
+
DESC PIPE EXTENDED <pipe_name>;
|
|
296
|
+
|
|
297
|
+
-- 查看所有 Pipe
|
|
298
|
+
SHOW PIPES;
|
|
299
|
+
|
|
300
|
+
-- 查看加载历史
|
|
301
|
+
SELECT * FROM load_history('<schema>.<table>')
|
|
302
|
+
ORDER BY last_load_time DESC LIMIT 20;
|
|
303
|
+
|
|
304
|
+
-- 通过 query_tag 查看 Pipe 作业
|
|
305
|
+
-- 格式:pipe.<workspace_name>.<schema_name>.<pipe_name>
|
|
306
|
+
SHOW JOBS WHERE query_tag = 'pipe.my_workspace.ods.kafka_orders_pipe';
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
---
|
|
310
|
+
|
|
311
|
+
## DROP PIPE
|
|
312
|
+
|
|
313
|
+
```sql
|
|
314
|
+
DROP PIPE [ IF EXISTS ] <pipe_name>;
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
## 参考文档
|
|
318
|
+
|
|
319
|
+
- [Pipe 简介](https://www.yunqi.tech/documents/pipe-summary)
|
|
320
|
+
- [借助 read_kafka 函数持续导入](https://www.yunqi.tech/documents/pipe-kafka)
|
|
321
|
+
- [借助 Kafka 外表 Table Stream 持续导入](https://www.yunqi.tech/documents/pipe-kafka-table-stream)
|
|
322
|
+
- [最佳实践:使用 Pipe 高效接入 Kafka 数据](https://www.yunqi.tech/documents/pipe-kafka-bestpractice-1)
|
|
323
|
+
- [Kafka 外部表](https://www.yunqi.tech/documents/kafka-external-table)
|
|
324
|
+
- [Kafka Storage Connection](https://www.yunqi.tech/documents/Kafka_connection)
|