kafka-light-python 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kafka_light_python-0.1.0/.gitignore +34 -0
- kafka_light_python-0.1.0/LICENSE +21 -0
- kafka_light_python-0.1.0/PKG-INFO +86 -0
- kafka_light_python-0.1.0/README.md +60 -0
- kafka_light_python-0.1.0/pyproject.toml +56 -0
- kafka_light_python-0.1.0/src/kafka_light/__init__.py +24 -0
- kafka_light_python-0.1.0/src/kafka_light/_generated/__init__.py +3 -0
- kafka_light_python-0.1.0/src/kafka_light/_generated/kafka_light_pb2.py +75 -0
- kafka_light_python-0.1.0/src/kafka_light/_generated/kafka_light_pb2_grpc.py +435 -0
- kafka_light_python-0.1.0/src/kafka_light/client.py +404 -0
- kafka_light_python-0.1.0/src/kafka_light/config.py +10 -0
- kafka_light_python-0.1.0/src/kafka_light/interfaces.py +49 -0
- kafka_light_python-0.1.0/src/kafka_light/models.py +29 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
.env
|
|
2
|
+
.cache
|
|
3
|
+
*.egg-info
|
|
4
|
+
.python-version
|
|
5
|
+
__pycache__
|
|
6
|
+
|
|
7
|
+
# tmp dir
|
|
8
|
+
tmp/
|
|
9
|
+
|
|
10
|
+
# Generated SDK packages
|
|
11
|
+
sdk/
|
|
12
|
+
scenario/*/sdk/
|
|
13
|
+
|
|
14
|
+
# Metadata database
|
|
15
|
+
paladin_metadata.db
|
|
16
|
+
|
|
17
|
+
.claude
|
|
18
|
+
|
|
19
|
+
# CLAUDE.md files - use README.md for component documentation
|
|
20
|
+
CLAUDE.md
|
|
21
|
+
**/CLAUDE.md
|
|
22
|
+
|
|
23
|
+
# Local config (user-specific)
|
|
24
|
+
paladin.toml
|
|
25
|
+
|
|
26
|
+
# Pipeline data (uploaded packages)
|
|
27
|
+
system/apps/api/data/
|
|
28
|
+
|
|
29
|
+
# Runtime workbench directories (staged/deployed bundles)
|
|
30
|
+
.staged/
|
|
31
|
+
.deploy/
|
|
32
|
+
|
|
33
|
+
# Rust build artifacts
|
|
34
|
+
target/
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Youngseok Choi
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: kafka-light-python
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Local Python SDK for kafka-light
|
|
5
|
+
Project-URL: Homepage, https://github.com/paladin-pipeline/paladin-pipeline
|
|
6
|
+
Project-URL: Repository, https://github.com/paladin-pipeline/paladin-pipeline
|
|
7
|
+
Author: Youngseok Choi
|
|
8
|
+
License: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: grpc,kafka,sdk,streaming
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
17
|
+
Requires-Python: >=3.13
|
|
18
|
+
Requires-Dist: grpcio>=1.78.0
|
|
19
|
+
Requires-Dist: protobuf>=6.31.1
|
|
20
|
+
Provides-Extra: dev
|
|
21
|
+
Requires-Dist: pyright>=1.1.407; extra == 'dev'
|
|
22
|
+
Requires-Dist: pytest-asyncio>=1.3.0; extra == 'dev'
|
|
23
|
+
Requires-Dist: pytest>=9.0.0; extra == 'dev'
|
|
24
|
+
Requires-Dist: ruff>=0.14.0; extra == 'dev'
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# kafka-light-python
|
|
28
|
+
|
|
29
|
+
`kafka-light-python` is a small Python SDK for producing and consuming events through the `kafka-light` gRPC transport.
|
|
30
|
+
|
|
31
|
+
## Requirements
|
|
32
|
+
|
|
33
|
+
- Python `3.13+`
|
|
34
|
+
- A reachable `kafka-light` server endpoint
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install kafka-light-python
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Quick Start
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import asyncio
|
|
46
|
+
|
|
47
|
+
from kafka_light import KafkaLightConfig, KafkaLightConsumer, KafkaLightProducer, TopicPartition
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def main() -> None:
|
|
51
|
+
config = KafkaLightConfig(brokers=["127.0.0.1:7171"])
|
|
52
|
+
|
|
53
|
+
producer = KafkaLightProducer(config)
|
|
54
|
+
await producer.start()
|
|
55
|
+
await producer.send_and_wait("events", value={"message": "hello"})
|
|
56
|
+
await producer.stop()
|
|
57
|
+
|
|
58
|
+
consumer = KafkaLightConsumer(config, group_id="example-group", topics=["events"])
|
|
59
|
+
await consumer.start()
|
|
60
|
+
|
|
61
|
+
message = await consumer.__anext__()
|
|
62
|
+
print(message.value)
|
|
63
|
+
|
|
64
|
+
await consumer.commit({TopicPartition("events"): message.offset + 1})
|
|
65
|
+
await consumer.stop()
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
asyncio.run(main())
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
## Public API
|
|
72
|
+
|
|
73
|
+
- `KafkaLightConfig`: broker endpoint configuration
|
|
74
|
+
- `KafkaLightProducer`: async producer adapter
|
|
75
|
+
- `KafkaLightConsumer`: async consumer adapter
|
|
76
|
+
- `PublishAck`, `ReceivedMessage`, `TopicPartition`: shared transport models
|
|
77
|
+
|
|
78
|
+
## Notes
|
|
79
|
+
|
|
80
|
+
- The current client uses only the first configured broker endpoint.
|
|
81
|
+
- Consumer support is currently limited to partition `0`.
|
|
82
|
+
- `auto_offset_reset` currently supports committed offsets or `"earliest"`.
|
|
83
|
+
|
|
84
|
+
## License
|
|
85
|
+
|
|
86
|
+
MIT
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# kafka-light-python
|
|
2
|
+
|
|
3
|
+
`kafka-light-python` is a small Python SDK for producing and consuming events through the `kafka-light` gRPC transport.
|
|
4
|
+
|
|
5
|
+
## Requirements
|
|
6
|
+
|
|
7
|
+
- Python `3.13+`
|
|
8
|
+
- A reachable `kafka-light` server endpoint
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
```bash
|
|
13
|
+
pip install kafka-light-python
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
## Quick Start
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
import asyncio
|
|
20
|
+
|
|
21
|
+
from kafka_light import KafkaLightConfig, KafkaLightConsumer, KafkaLightProducer, TopicPartition
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
async def main() -> None:
|
|
25
|
+
config = KafkaLightConfig(brokers=["127.0.0.1:7171"])
|
|
26
|
+
|
|
27
|
+
producer = KafkaLightProducer(config)
|
|
28
|
+
await producer.start()
|
|
29
|
+
await producer.send_and_wait("events", value={"message": "hello"})
|
|
30
|
+
await producer.stop()
|
|
31
|
+
|
|
32
|
+
consumer = KafkaLightConsumer(config, group_id="example-group", topics=["events"])
|
|
33
|
+
await consumer.start()
|
|
34
|
+
|
|
35
|
+
message = await consumer.__anext__()
|
|
36
|
+
print(message.value)
|
|
37
|
+
|
|
38
|
+
await consumer.commit({TopicPartition("events"): message.offset + 1})
|
|
39
|
+
await consumer.stop()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
asyncio.run(main())
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Public API
|
|
46
|
+
|
|
47
|
+
- `KafkaLightConfig`: broker endpoint configuration
|
|
48
|
+
- `KafkaLightProducer`: async producer adapter
|
|
49
|
+
- `KafkaLightConsumer`: async consumer adapter
|
|
50
|
+
- `PublishAck`, `ReceivedMessage`, `TopicPartition`: shared transport models
|
|
51
|
+
|
|
52
|
+
## Notes
|
|
53
|
+
|
|
54
|
+
- The current client uses only the first configured broker endpoint.
|
|
55
|
+
- Consumer support is currently limited to partition `0`.
|
|
56
|
+
- `auto_offset_reset` currently supports committed offsets or `"earliest"`.
|
|
57
|
+
|
|
58
|
+
## License
|
|
59
|
+
|
|
60
|
+
MIT
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "kafka-light-python"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Local Python SDK for kafka-light"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.13"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [{ name = "Youngseok Choi" }]
|
|
13
|
+
keywords = ["grpc", "kafka", "sdk", "streaming"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.13",
|
|
20
|
+
"Topic :: Software Development :: Libraries",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"grpcio>=1.78.0",
|
|
24
|
+
"protobuf>=6.31.1",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
[project.urls]
|
|
28
|
+
Homepage = "https://github.com/paladin-pipeline/paladin-pipeline"
|
|
29
|
+
Repository = "https://github.com/paladin-pipeline/paladin-pipeline"
|
|
30
|
+
|
|
31
|
+
[tool.hatch.build.targets.wheel]
|
|
32
|
+
packages = ["src/kafka_light"]
|
|
33
|
+
|
|
34
|
+
[project.optional-dependencies]
|
|
35
|
+
dev = [
|
|
36
|
+
"pytest>=9.0.0",
|
|
37
|
+
"pytest-asyncio>=1.3.0",
|
|
38
|
+
"ruff>=0.14.0",
|
|
39
|
+
"pyright>=1.1.407",
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
[tool.ruff]
|
|
43
|
+
line-length = 100
|
|
44
|
+
|
|
45
|
+
[tool.ruff.lint]
|
|
46
|
+
select = ["E", "F", "I", "N", "W", "UP", "RUF"]
|
|
47
|
+
|
|
48
|
+
[tool.ruff.lint.per-file-ignores]
|
|
49
|
+
"src/kafka_light/_generated/*.py" = ["E501", "N802"]
|
|
50
|
+
|
|
51
|
+
[tool.pyright]
|
|
52
|
+
pythonVersion = "3.13"
|
|
53
|
+
venvPath = "."
|
|
54
|
+
venv = ".venv"
|
|
55
|
+
extraPaths = ["src"]
|
|
56
|
+
exclude = [".venv"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Local kafka-light Python SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .client import KafkaLightConsumerAdapter, KafkaLightProducerAdapter, _target
|
|
6
|
+
from .config import KafkaLightConfig
|
|
7
|
+
from .interfaces import EventConsumerRebalanceListener
|
|
8
|
+
from .models import PublishAck, ReceivedMessage, TopicPartition
|
|
9
|
+
|
|
10
|
+
KafkaLightConsumer = KafkaLightConsumerAdapter
|
|
11
|
+
KafkaLightProducer = KafkaLightProducerAdapter
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"EventConsumerRebalanceListener",
|
|
15
|
+
"KafkaLightConfig",
|
|
16
|
+
"KafkaLightConsumer",
|
|
17
|
+
"KafkaLightConsumerAdapter",
|
|
18
|
+
"KafkaLightProducer",
|
|
19
|
+
"KafkaLightProducerAdapter",
|
|
20
|
+
"PublishAck",
|
|
21
|
+
"ReceivedMessage",
|
|
22
|
+
"TopicPartition",
|
|
23
|
+
"_target",
|
|
24
|
+
]
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
3
|
+
# NO CHECKED-IN PROTOBUF GENCODE
|
|
4
|
+
# source: kafka_light.proto
|
|
5
|
+
# Protobuf Python Version: 6.31.1
|
|
6
|
+
"""Generated protocol buffer code."""
|
|
7
|
+
|
|
8
|
+
from google.protobuf import descriptor as _descriptor
|
|
9
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
|
10
|
+
from google.protobuf import runtime_version as _runtime_version
|
|
11
|
+
from google.protobuf import symbol_database as _symbol_database
|
|
12
|
+
from google.protobuf.internal import builder as _builder
|
|
13
|
+
|
|
14
|
+
_runtime_version.ValidateProtobufRuntimeVersion(
|
|
15
|
+
_runtime_version.Domain.PUBLIC, 6, 31, 1, "", "kafka_light.proto"
|
|
16
|
+
)
|
|
17
|
+
# @@protoc_insertion_point(imports)
|
|
18
|
+
|
|
19
|
+
_sym_db = _symbol_database.Default()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
|
23
|
+
b'\n\x11kafka_light.proto\x12\x0bkafka_light"$\n\x06Header\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c"\x82\x01\n\nNewMessage\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x14\n\x0cpayload_json\x18\x03 \x01(\x0c\x12$\n\x07headers\x18\x04 \x03(\x0b\x32\x13.kafka_light.Header\x12\x1c\n\x14published_at_unix_ms\x18\x05 \x01(\x03"[\n\tAppendAck\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpartition\x18\x02 \x01(\x05\x12\x0e\n\x06offset\x18\x03 \x01(\x04\x12\x1c\n\x14published_at_unix_ms\x18\x04 \x01(\x03"\xa8\x01\n\rStoredMessage\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpartition\x18\x02 \x01(\x05\x12\x0e\n\x06offset\x18\x03 \x01(\x04\x12\x0b\n\x03key\x18\x04 \x01(\t\x12\x14\n\x0cpayload_json\x18\x05 \x01(\x0c\x12$\n\x07headers\x18\x06 \x03(\x0b\x32\x13.kafka_light.Header\x12\x1c\n\x14published_at_unix_ms\x18\x07 \x01(\x03"C\n\nAssignment\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x11\n\tpartition\x18\x02 \x01(\x05\x12\x13\n\x0bnext_offset\x18\x03 \x01(\x04"\x91\x01\n\x10JoinGroupRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\x11\n\tmember_id\x18\x02 \x01(\t\x12\x0e\n\x06topics\x18\x03 \x03(\t\x12\x11\n\tpartition\x18\x04 \x01(\x05\x12\x1a\n\x12session_timeout_ms\x18\x05 \x01(\r\x12\x19\n\x11joined_at_unix_ms\x18\x06 \x01(\x03"U\n\x11JoinGroupResponse\x12\x12\n\ngeneration\x18\x01 \x01(\x04\x12,\n\x0b\x61ssignments\x18\x02 \x03(\x0b\x32\x17.kafka_light.Assignment"i\n\x10HeartbeatRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\x11\n\tmember_id\x18\x02 \x01(\t\x12\x12\n\ngeneration\x18\x03 \x01(\x04\x12\x1c\n\x14heartbeat_at_unix_ms\x18\x04 \x01(\x03"=\n\x11HeartbeatResponse\x12\x12\n\ngeneration\x18\x01 \x01(\x04\x12\x14\n\x0cneeds_rejoin\x18\x02 \x01(\x08"\xa6\x01\n\x13PollMessagesRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\x11\n\tmember_id\x18\x02 \x01(\t\x12\x12\n\ngeneration\x18\x03 \x01(\x04\x12\r\n\x05topic\x18\x04 \x01(\t\x12\x11\n\tpartition\x18\x05 \x01(\x05\x12\x14\n\x0cstart_offset\x18\x06 \x01(\x04\x12\r\n\x05limit\x18\x07 \x01(\r\x12\x0f\n\x07wait_ms\x18\x08 \x01(\r"D\n\x14PollMessagesResponse\x12,\n\x08messages\x18\x01 \x03(\x0b\x32\x1a.kafka_light.StoredMessage"\xa2\x01\n\x14\x43ommitOffsetsRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\x11\n\tmember_id\x18\x02 \x01(\t\x12\x12\n\ngeneration\x18\x03 \x01(\x04\x12\r\n\x05topic\x18\x04 \x01(\t\x12\x11\n\tpartition\x18\x05 \x01(\x05\x12\x13\n\x0bnext_offset\x18\x06 \x01(\x04\x12\x1a\n\x12updated_at_unix_ms\x18\x07 \x01(\x03"a\n\x15\x43ommitOffsetsResponse\x12\x11\n\tcommitted\x18\x01 \x01(\x08\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x11\n\tpartition\x18\x03 \x01(\x05\x12\x13\n\x0bnext_offset\x18\x04 \x01(\x04"8\n\x11LeaveGroupRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\x11\n\tmember_id\x18\x02 \x01(\t""\n\x12LeaveGroupResponse\x12\x0c\n\x04left\x18\x01 \x01(\x08"O\n\x19GetCommittedOffsetRequest\x12\x10\n\x08group_id\x18\x01 \x01(\t\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x11\n\tpartition\x18\x03 \x01(\x05"w\n\x1aGetCommittedOffsetResponse\x12\r\n\x05\x66ound\x18\x01 \x01(\x08\x12\r\n\x05topic\x18\x02 \x01(\t\x12\x11\n\tpartition\x18\x03 \x01(\x05\x12\x18\n\x0bnext_offset\x18\x04 \x01(\x04H\x00\x88\x01\x01\x42\x0e\n\x0c_next_offset"I\n\x13ReadMessagesRequest\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x14\n\x0cstart_offset\x18\x02 \x01(\x04\x12\r\n\x05limit\x18\x03 \x01(\r"D\n\x14ReadMessagesResponse\x12,\n\x08messages\x18\x01 \x03(\x0b\x32\x1a.kafka_light.StoredMessage"\x14\n\x12HealthCheckRequest"U\n\x13HealthCheckResponse\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x17\n\x0fstorage_backend\x18\x02 \x01(\t\x12\x15\n\rdatabase_path\x18\x03 \x01(\t2\x90\x05\n\nKafkaLight\x12K\n\x06Health\x12\x1f.kafka_light.HealthCheckRequest\x1a .kafka_light.HealthCheckResponse\x12:\n\x07Publish\x12\x17.kafka_light.NewMessage\x1a\x16.kafka_light.AppendAck\x12J\n\tJoinGroup\x12\x1d.kafka_light.JoinGroupRequest\x1a\x1e.kafka_light.JoinGroupResponse\x12J\n\tHeartbeat\x12\x1d.kafka_light.HeartbeatRequest\x1a\x1e.kafka_light.HeartbeatResponse\x12S\n\x0cPollMessages\x12 .kafka_light.PollMessagesRequest\x1a!.kafka_light.PollMessagesResponse\x12V\n\rCommitOffsets\x12!.kafka_light.CommitOffsetsRequest\x1a".kafka_light.CommitOffsetsResponse\x12M\n\nLeaveGroup\x12\x1e.kafka_light.LeaveGroupRequest\x1a\x1f.kafka_light.LeaveGroupResponse\x12\x65\n\x12GetCommittedOffset\x12&.kafka_light.GetCommittedOffsetRequest\x1a\'.kafka_light.GetCommittedOffsetResponseb\x06proto3'
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
_globals = globals()
|
|
27
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
|
28
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "kafka_light_pb2", _globals)
|
|
29
|
+
if not _descriptor._USE_C_DESCRIPTORS:
|
|
30
|
+
DESCRIPTOR._loaded_options = None
|
|
31
|
+
_globals["_HEADER"]._serialized_start = 34
|
|
32
|
+
_globals["_HEADER"]._serialized_end = 70
|
|
33
|
+
_globals["_NEWMESSAGE"]._serialized_start = 73
|
|
34
|
+
_globals["_NEWMESSAGE"]._serialized_end = 203
|
|
35
|
+
_globals["_APPENDACK"]._serialized_start = 205
|
|
36
|
+
_globals["_APPENDACK"]._serialized_end = 296
|
|
37
|
+
_globals["_STOREDMESSAGE"]._serialized_start = 299
|
|
38
|
+
_globals["_STOREDMESSAGE"]._serialized_end = 467
|
|
39
|
+
_globals["_ASSIGNMENT"]._serialized_start = 469
|
|
40
|
+
_globals["_ASSIGNMENT"]._serialized_end = 536
|
|
41
|
+
_globals["_JOINGROUPREQUEST"]._serialized_start = 539
|
|
42
|
+
_globals["_JOINGROUPREQUEST"]._serialized_end = 684
|
|
43
|
+
_globals["_JOINGROUPRESPONSE"]._serialized_start = 686
|
|
44
|
+
_globals["_JOINGROUPRESPONSE"]._serialized_end = 771
|
|
45
|
+
_globals["_HEARTBEATREQUEST"]._serialized_start = 773
|
|
46
|
+
_globals["_HEARTBEATREQUEST"]._serialized_end = 878
|
|
47
|
+
_globals["_HEARTBEATRESPONSE"]._serialized_start = 880
|
|
48
|
+
_globals["_HEARTBEATRESPONSE"]._serialized_end = 941
|
|
49
|
+
_globals["_POLLMESSAGESREQUEST"]._serialized_start = 944
|
|
50
|
+
_globals["_POLLMESSAGESREQUEST"]._serialized_end = 1110
|
|
51
|
+
_globals["_POLLMESSAGESRESPONSE"]._serialized_start = 1112
|
|
52
|
+
_globals["_POLLMESSAGESRESPONSE"]._serialized_end = 1180
|
|
53
|
+
_globals["_COMMITOFFSETSREQUEST"]._serialized_start = 1183
|
|
54
|
+
_globals["_COMMITOFFSETSREQUEST"]._serialized_end = 1345
|
|
55
|
+
_globals["_COMMITOFFSETSRESPONSE"]._serialized_start = 1347
|
|
56
|
+
_globals["_COMMITOFFSETSRESPONSE"]._serialized_end = 1444
|
|
57
|
+
_globals["_LEAVEGROUPREQUEST"]._serialized_start = 1446
|
|
58
|
+
_globals["_LEAVEGROUPREQUEST"]._serialized_end = 1502
|
|
59
|
+
_globals["_LEAVEGROUPRESPONSE"]._serialized_start = 1504
|
|
60
|
+
_globals["_LEAVEGROUPRESPONSE"]._serialized_end = 1538
|
|
61
|
+
_globals["_GETCOMMITTEDOFFSETREQUEST"]._serialized_start = 1540
|
|
62
|
+
_globals["_GETCOMMITTEDOFFSETREQUEST"]._serialized_end = 1619
|
|
63
|
+
_globals["_GETCOMMITTEDOFFSETRESPONSE"]._serialized_start = 1621
|
|
64
|
+
_globals["_GETCOMMITTEDOFFSETRESPONSE"]._serialized_end = 1740
|
|
65
|
+
_globals["_READMESSAGESREQUEST"]._serialized_start = 1742
|
|
66
|
+
_globals["_READMESSAGESREQUEST"]._serialized_end = 1815
|
|
67
|
+
_globals["_READMESSAGESRESPONSE"]._serialized_start = 1817
|
|
68
|
+
_globals["_READMESSAGESRESPONSE"]._serialized_end = 1885
|
|
69
|
+
_globals["_HEALTHCHECKREQUEST"]._serialized_start = 1887
|
|
70
|
+
_globals["_HEALTHCHECKREQUEST"]._serialized_end = 1907
|
|
71
|
+
_globals["_HEALTHCHECKRESPONSE"]._serialized_start = 1909
|
|
72
|
+
_globals["_HEALTHCHECKRESPONSE"]._serialized_end = 1994
|
|
73
|
+
_globals["_KAFKALIGHT"]._serialized_start = 1997
|
|
74
|
+
_globals["_KAFKALIGHT"]._serialized_end = 2653
|
|
75
|
+
# @@protoc_insertion_point(module_scope)
|
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
|
2
|
+
"""Client and server classes corresponding to protobuf-defined services."""
|
|
3
|
+
|
|
4
|
+
import grpc
|
|
5
|
+
import warnings
|
|
6
|
+
|
|
7
|
+
from . import kafka_light_pb2 as kafka__light__pb2
|
|
8
|
+
|
|
9
|
+
GRPC_GENERATED_VERSION = "1.80.0"
|
|
10
|
+
GRPC_VERSION = grpc.__version__
|
|
11
|
+
_version_not_supported = False
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
from grpc._utilities import first_version_is_lower
|
|
15
|
+
|
|
16
|
+
_version_not_supported = first_version_is_lower(
|
|
17
|
+
GRPC_VERSION, GRPC_GENERATED_VERSION
|
|
18
|
+
)
|
|
19
|
+
except ImportError:
|
|
20
|
+
_version_not_supported = True
|
|
21
|
+
|
|
22
|
+
if _version_not_supported:
|
|
23
|
+
raise RuntimeError(
|
|
24
|
+
f"The grpc package installed is at version {GRPC_VERSION},"
|
|
25
|
+
+ " but the generated code in kafka_light_pb2_grpc.py depends on"
|
|
26
|
+
+ f" grpcio>={GRPC_GENERATED_VERSION}."
|
|
27
|
+
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
|
|
28
|
+
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class KafkaLightStub(object):
|
|
33
|
+
"""Missing associated documentation comment in .proto file."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, channel):
|
|
36
|
+
"""Constructor.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
channel: A grpc.Channel.
|
|
40
|
+
"""
|
|
41
|
+
self.Health = channel.unary_unary(
|
|
42
|
+
"/kafka_light.KafkaLight/Health",
|
|
43
|
+
request_serializer=kafka__light__pb2.HealthCheckRequest.SerializeToString,
|
|
44
|
+
response_deserializer=kafka__light__pb2.HealthCheckResponse.FromString,
|
|
45
|
+
_registered_method=True,
|
|
46
|
+
)
|
|
47
|
+
self.Publish = channel.unary_unary(
|
|
48
|
+
"/kafka_light.KafkaLight/Publish",
|
|
49
|
+
request_serializer=kafka__light__pb2.NewMessage.SerializeToString,
|
|
50
|
+
response_deserializer=kafka__light__pb2.AppendAck.FromString,
|
|
51
|
+
_registered_method=True,
|
|
52
|
+
)
|
|
53
|
+
self.JoinGroup = channel.unary_unary(
|
|
54
|
+
"/kafka_light.KafkaLight/JoinGroup",
|
|
55
|
+
request_serializer=kafka__light__pb2.JoinGroupRequest.SerializeToString,
|
|
56
|
+
response_deserializer=kafka__light__pb2.JoinGroupResponse.FromString,
|
|
57
|
+
_registered_method=True,
|
|
58
|
+
)
|
|
59
|
+
self.Heartbeat = channel.unary_unary(
|
|
60
|
+
"/kafka_light.KafkaLight/Heartbeat",
|
|
61
|
+
request_serializer=kafka__light__pb2.HeartbeatRequest.SerializeToString,
|
|
62
|
+
response_deserializer=kafka__light__pb2.HeartbeatResponse.FromString,
|
|
63
|
+
_registered_method=True,
|
|
64
|
+
)
|
|
65
|
+
self.PollMessages = channel.unary_unary(
|
|
66
|
+
"/kafka_light.KafkaLight/PollMessages",
|
|
67
|
+
request_serializer=kafka__light__pb2.PollMessagesRequest.SerializeToString,
|
|
68
|
+
response_deserializer=kafka__light__pb2.PollMessagesResponse.FromString,
|
|
69
|
+
_registered_method=True,
|
|
70
|
+
)
|
|
71
|
+
self.CommitOffsets = channel.unary_unary(
|
|
72
|
+
"/kafka_light.KafkaLight/CommitOffsets",
|
|
73
|
+
request_serializer=kafka__light__pb2.CommitOffsetsRequest.SerializeToString,
|
|
74
|
+
response_deserializer=kafka__light__pb2.CommitOffsetsResponse.FromString,
|
|
75
|
+
_registered_method=True,
|
|
76
|
+
)
|
|
77
|
+
self.LeaveGroup = channel.unary_unary(
|
|
78
|
+
"/kafka_light.KafkaLight/LeaveGroup",
|
|
79
|
+
request_serializer=kafka__light__pb2.LeaveGroupRequest.SerializeToString,
|
|
80
|
+
response_deserializer=kafka__light__pb2.LeaveGroupResponse.FromString,
|
|
81
|
+
_registered_method=True,
|
|
82
|
+
)
|
|
83
|
+
self.GetCommittedOffset = channel.unary_unary(
|
|
84
|
+
"/kafka_light.KafkaLight/GetCommittedOffset",
|
|
85
|
+
request_serializer=kafka__light__pb2.GetCommittedOffsetRequest.SerializeToString,
|
|
86
|
+
response_deserializer=kafka__light__pb2.GetCommittedOffsetResponse.FromString,
|
|
87
|
+
_registered_method=True,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class KafkaLightServicer(object):
|
|
92
|
+
"""Missing associated documentation comment in .proto file."""
|
|
93
|
+
|
|
94
|
+
def Health(self, request, context):
|
|
95
|
+
"""Missing associated documentation comment in .proto file."""
|
|
96
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
97
|
+
context.set_details("Method not implemented!")
|
|
98
|
+
raise NotImplementedError("Method not implemented!")
|
|
99
|
+
|
|
100
|
+
def Publish(self, request, context):
|
|
101
|
+
"""Missing associated documentation comment in .proto file."""
|
|
102
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
103
|
+
context.set_details("Method not implemented!")
|
|
104
|
+
raise NotImplementedError("Method not implemented!")
|
|
105
|
+
|
|
106
|
+
def JoinGroup(self, request, context):
|
|
107
|
+
"""Missing associated documentation comment in .proto file."""
|
|
108
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
109
|
+
context.set_details("Method not implemented!")
|
|
110
|
+
raise NotImplementedError("Method not implemented!")
|
|
111
|
+
|
|
112
|
+
def Heartbeat(self, request, context):
|
|
113
|
+
"""Missing associated documentation comment in .proto file."""
|
|
114
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
115
|
+
context.set_details("Method not implemented!")
|
|
116
|
+
raise NotImplementedError("Method not implemented!")
|
|
117
|
+
|
|
118
|
+
def PollMessages(self, request, context):
|
|
119
|
+
"""Missing associated documentation comment in .proto file."""
|
|
120
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
121
|
+
context.set_details("Method not implemented!")
|
|
122
|
+
raise NotImplementedError("Method not implemented!")
|
|
123
|
+
|
|
124
|
+
def CommitOffsets(self, request, context):
|
|
125
|
+
"""Missing associated documentation comment in .proto file."""
|
|
126
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
127
|
+
context.set_details("Method not implemented!")
|
|
128
|
+
raise NotImplementedError("Method not implemented!")
|
|
129
|
+
|
|
130
|
+
def LeaveGroup(self, request, context):
|
|
131
|
+
"""Missing associated documentation comment in .proto file."""
|
|
132
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
133
|
+
context.set_details("Method not implemented!")
|
|
134
|
+
raise NotImplementedError("Method not implemented!")
|
|
135
|
+
|
|
136
|
+
def GetCommittedOffset(self, request, context):
|
|
137
|
+
"""Missing associated documentation comment in .proto file."""
|
|
138
|
+
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
|
139
|
+
context.set_details("Method not implemented!")
|
|
140
|
+
raise NotImplementedError("Method not implemented!")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def add_KafkaLightServicer_to_server(servicer, server):
|
|
144
|
+
rpc_method_handlers = {
|
|
145
|
+
"Health": grpc.unary_unary_rpc_method_handler(
|
|
146
|
+
servicer.Health,
|
|
147
|
+
request_deserializer=kafka__light__pb2.HealthCheckRequest.FromString,
|
|
148
|
+
response_serializer=kafka__light__pb2.HealthCheckResponse.SerializeToString,
|
|
149
|
+
),
|
|
150
|
+
"Publish": grpc.unary_unary_rpc_method_handler(
|
|
151
|
+
servicer.Publish,
|
|
152
|
+
request_deserializer=kafka__light__pb2.NewMessage.FromString,
|
|
153
|
+
response_serializer=kafka__light__pb2.AppendAck.SerializeToString,
|
|
154
|
+
),
|
|
155
|
+
"JoinGroup": grpc.unary_unary_rpc_method_handler(
|
|
156
|
+
servicer.JoinGroup,
|
|
157
|
+
request_deserializer=kafka__light__pb2.JoinGroupRequest.FromString,
|
|
158
|
+
response_serializer=kafka__light__pb2.JoinGroupResponse.SerializeToString,
|
|
159
|
+
),
|
|
160
|
+
"Heartbeat": grpc.unary_unary_rpc_method_handler(
|
|
161
|
+
servicer.Heartbeat,
|
|
162
|
+
request_deserializer=kafka__light__pb2.HeartbeatRequest.FromString,
|
|
163
|
+
response_serializer=kafka__light__pb2.HeartbeatResponse.SerializeToString,
|
|
164
|
+
),
|
|
165
|
+
"PollMessages": grpc.unary_unary_rpc_method_handler(
|
|
166
|
+
servicer.PollMessages,
|
|
167
|
+
request_deserializer=kafka__light__pb2.PollMessagesRequest.FromString,
|
|
168
|
+
response_serializer=kafka__light__pb2.PollMessagesResponse.SerializeToString,
|
|
169
|
+
),
|
|
170
|
+
"CommitOffsets": grpc.unary_unary_rpc_method_handler(
|
|
171
|
+
servicer.CommitOffsets,
|
|
172
|
+
request_deserializer=kafka__light__pb2.CommitOffsetsRequest.FromString,
|
|
173
|
+
response_serializer=kafka__light__pb2.CommitOffsetsResponse.SerializeToString,
|
|
174
|
+
),
|
|
175
|
+
"LeaveGroup": grpc.unary_unary_rpc_method_handler(
|
|
176
|
+
servicer.LeaveGroup,
|
|
177
|
+
request_deserializer=kafka__light__pb2.LeaveGroupRequest.FromString,
|
|
178
|
+
response_serializer=kafka__light__pb2.LeaveGroupResponse.SerializeToString,
|
|
179
|
+
),
|
|
180
|
+
"GetCommittedOffset": grpc.unary_unary_rpc_method_handler(
|
|
181
|
+
servicer.GetCommittedOffset,
|
|
182
|
+
request_deserializer=kafka__light__pb2.GetCommittedOffsetRequest.FromString,
|
|
183
|
+
response_serializer=kafka__light__pb2.GetCommittedOffsetResponse.SerializeToString,
|
|
184
|
+
),
|
|
185
|
+
}
|
|
186
|
+
generic_handler = grpc.method_handlers_generic_handler(
|
|
187
|
+
"kafka_light.KafkaLight", rpc_method_handlers
|
|
188
|
+
)
|
|
189
|
+
server.add_generic_rpc_handlers((generic_handler,))
|
|
190
|
+
server.add_registered_method_handlers("kafka_light.KafkaLight", rpc_method_handlers)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# This class is part of an EXPERIMENTAL API.
|
|
194
|
+
class KafkaLight(object):
|
|
195
|
+
"""Missing associated documentation comment in .proto file."""
|
|
196
|
+
|
|
197
|
+
@staticmethod
|
|
198
|
+
def Health(
|
|
199
|
+
request,
|
|
200
|
+
target,
|
|
201
|
+
options=(),
|
|
202
|
+
channel_credentials=None,
|
|
203
|
+
call_credentials=None,
|
|
204
|
+
insecure=False,
|
|
205
|
+
compression=None,
|
|
206
|
+
wait_for_ready=None,
|
|
207
|
+
timeout=None,
|
|
208
|
+
metadata=None,
|
|
209
|
+
):
|
|
210
|
+
return grpc.experimental.unary_unary(
|
|
211
|
+
request,
|
|
212
|
+
target,
|
|
213
|
+
"/kafka_light.KafkaLight/Health",
|
|
214
|
+
kafka__light__pb2.HealthCheckRequest.SerializeToString,
|
|
215
|
+
kafka__light__pb2.HealthCheckResponse.FromString,
|
|
216
|
+
options,
|
|
217
|
+
channel_credentials,
|
|
218
|
+
insecure,
|
|
219
|
+
call_credentials,
|
|
220
|
+
compression,
|
|
221
|
+
wait_for_ready,
|
|
222
|
+
timeout,
|
|
223
|
+
metadata,
|
|
224
|
+
_registered_method=True,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def Publish(
|
|
229
|
+
request,
|
|
230
|
+
target,
|
|
231
|
+
options=(),
|
|
232
|
+
channel_credentials=None,
|
|
233
|
+
call_credentials=None,
|
|
234
|
+
insecure=False,
|
|
235
|
+
compression=None,
|
|
236
|
+
wait_for_ready=None,
|
|
237
|
+
timeout=None,
|
|
238
|
+
metadata=None,
|
|
239
|
+
):
|
|
240
|
+
return grpc.experimental.unary_unary(
|
|
241
|
+
request,
|
|
242
|
+
target,
|
|
243
|
+
"/kafka_light.KafkaLight/Publish",
|
|
244
|
+
kafka__light__pb2.NewMessage.SerializeToString,
|
|
245
|
+
kafka__light__pb2.AppendAck.FromString,
|
|
246
|
+
options,
|
|
247
|
+
channel_credentials,
|
|
248
|
+
insecure,
|
|
249
|
+
call_credentials,
|
|
250
|
+
compression,
|
|
251
|
+
wait_for_ready,
|
|
252
|
+
timeout,
|
|
253
|
+
metadata,
|
|
254
|
+
_registered_method=True,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
@staticmethod
|
|
258
|
+
def JoinGroup(
|
|
259
|
+
request,
|
|
260
|
+
target,
|
|
261
|
+
options=(),
|
|
262
|
+
channel_credentials=None,
|
|
263
|
+
call_credentials=None,
|
|
264
|
+
insecure=False,
|
|
265
|
+
compression=None,
|
|
266
|
+
wait_for_ready=None,
|
|
267
|
+
timeout=None,
|
|
268
|
+
metadata=None,
|
|
269
|
+
):
|
|
270
|
+
return grpc.experimental.unary_unary(
|
|
271
|
+
request,
|
|
272
|
+
target,
|
|
273
|
+
"/kafka_light.KafkaLight/JoinGroup",
|
|
274
|
+
kafka__light__pb2.JoinGroupRequest.SerializeToString,
|
|
275
|
+
kafka__light__pb2.JoinGroupResponse.FromString,
|
|
276
|
+
options,
|
|
277
|
+
channel_credentials,
|
|
278
|
+
insecure,
|
|
279
|
+
call_credentials,
|
|
280
|
+
compression,
|
|
281
|
+
wait_for_ready,
|
|
282
|
+
timeout,
|
|
283
|
+
metadata,
|
|
284
|
+
_registered_method=True,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
@staticmethod
|
|
288
|
+
def Heartbeat(
|
|
289
|
+
request,
|
|
290
|
+
target,
|
|
291
|
+
options=(),
|
|
292
|
+
channel_credentials=None,
|
|
293
|
+
call_credentials=None,
|
|
294
|
+
insecure=False,
|
|
295
|
+
compression=None,
|
|
296
|
+
wait_for_ready=None,
|
|
297
|
+
timeout=None,
|
|
298
|
+
metadata=None,
|
|
299
|
+
):
|
|
300
|
+
return grpc.experimental.unary_unary(
|
|
301
|
+
request,
|
|
302
|
+
target,
|
|
303
|
+
"/kafka_light.KafkaLight/Heartbeat",
|
|
304
|
+
kafka__light__pb2.HeartbeatRequest.SerializeToString,
|
|
305
|
+
kafka__light__pb2.HeartbeatResponse.FromString,
|
|
306
|
+
options,
|
|
307
|
+
channel_credentials,
|
|
308
|
+
insecure,
|
|
309
|
+
call_credentials,
|
|
310
|
+
compression,
|
|
311
|
+
wait_for_ready,
|
|
312
|
+
timeout,
|
|
313
|
+
metadata,
|
|
314
|
+
_registered_method=True,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
@staticmethod
|
|
318
|
+
def PollMessages(
|
|
319
|
+
request,
|
|
320
|
+
target,
|
|
321
|
+
options=(),
|
|
322
|
+
channel_credentials=None,
|
|
323
|
+
call_credentials=None,
|
|
324
|
+
insecure=False,
|
|
325
|
+
compression=None,
|
|
326
|
+
wait_for_ready=None,
|
|
327
|
+
timeout=None,
|
|
328
|
+
metadata=None,
|
|
329
|
+
):
|
|
330
|
+
return grpc.experimental.unary_unary(
|
|
331
|
+
request,
|
|
332
|
+
target,
|
|
333
|
+
"/kafka_light.KafkaLight/PollMessages",
|
|
334
|
+
kafka__light__pb2.PollMessagesRequest.SerializeToString,
|
|
335
|
+
kafka__light__pb2.PollMessagesResponse.FromString,
|
|
336
|
+
options,
|
|
337
|
+
channel_credentials,
|
|
338
|
+
insecure,
|
|
339
|
+
call_credentials,
|
|
340
|
+
compression,
|
|
341
|
+
wait_for_ready,
|
|
342
|
+
timeout,
|
|
343
|
+
metadata,
|
|
344
|
+
_registered_method=True,
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
@staticmethod
|
|
348
|
+
def CommitOffsets(
|
|
349
|
+
request,
|
|
350
|
+
target,
|
|
351
|
+
options=(),
|
|
352
|
+
channel_credentials=None,
|
|
353
|
+
call_credentials=None,
|
|
354
|
+
insecure=False,
|
|
355
|
+
compression=None,
|
|
356
|
+
wait_for_ready=None,
|
|
357
|
+
timeout=None,
|
|
358
|
+
metadata=None,
|
|
359
|
+
):
|
|
360
|
+
return grpc.experimental.unary_unary(
|
|
361
|
+
request,
|
|
362
|
+
target,
|
|
363
|
+
"/kafka_light.KafkaLight/CommitOffsets",
|
|
364
|
+
kafka__light__pb2.CommitOffsetsRequest.SerializeToString,
|
|
365
|
+
kafka__light__pb2.CommitOffsetsResponse.FromString,
|
|
366
|
+
options,
|
|
367
|
+
channel_credentials,
|
|
368
|
+
insecure,
|
|
369
|
+
call_credentials,
|
|
370
|
+
compression,
|
|
371
|
+
wait_for_ready,
|
|
372
|
+
timeout,
|
|
373
|
+
metadata,
|
|
374
|
+
_registered_method=True,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
@staticmethod
|
|
378
|
+
def LeaveGroup(
|
|
379
|
+
request,
|
|
380
|
+
target,
|
|
381
|
+
options=(),
|
|
382
|
+
channel_credentials=None,
|
|
383
|
+
call_credentials=None,
|
|
384
|
+
insecure=False,
|
|
385
|
+
compression=None,
|
|
386
|
+
wait_for_ready=None,
|
|
387
|
+
timeout=None,
|
|
388
|
+
metadata=None,
|
|
389
|
+
):
|
|
390
|
+
return grpc.experimental.unary_unary(
|
|
391
|
+
request,
|
|
392
|
+
target,
|
|
393
|
+
"/kafka_light.KafkaLight/LeaveGroup",
|
|
394
|
+
kafka__light__pb2.LeaveGroupRequest.SerializeToString,
|
|
395
|
+
kafka__light__pb2.LeaveGroupResponse.FromString,
|
|
396
|
+
options,
|
|
397
|
+
channel_credentials,
|
|
398
|
+
insecure,
|
|
399
|
+
call_credentials,
|
|
400
|
+
compression,
|
|
401
|
+
wait_for_ready,
|
|
402
|
+
timeout,
|
|
403
|
+
metadata,
|
|
404
|
+
_registered_method=True,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
@staticmethod
|
|
408
|
+
def GetCommittedOffset(
|
|
409
|
+
request,
|
|
410
|
+
target,
|
|
411
|
+
options=(),
|
|
412
|
+
channel_credentials=None,
|
|
413
|
+
call_credentials=None,
|
|
414
|
+
insecure=False,
|
|
415
|
+
compression=None,
|
|
416
|
+
wait_for_ready=None,
|
|
417
|
+
timeout=None,
|
|
418
|
+
metadata=None,
|
|
419
|
+
):
|
|
420
|
+
return grpc.experimental.unary_unary(
|
|
421
|
+
request,
|
|
422
|
+
target,
|
|
423
|
+
"/kafka_light.KafkaLight/GetCommittedOffset",
|
|
424
|
+
kafka__light__pb2.GetCommittedOffsetRequest.SerializeToString,
|
|
425
|
+
kafka__light__pb2.GetCommittedOffsetResponse.FromString,
|
|
426
|
+
options,
|
|
427
|
+
channel_credentials,
|
|
428
|
+
insecure,
|
|
429
|
+
call_credentials,
|
|
430
|
+
compression,
|
|
431
|
+
wait_for_ready,
|
|
432
|
+
timeout,
|
|
433
|
+
metadata,
|
|
434
|
+
_registered_method=True,
|
|
435
|
+
)
|
|
@@ -0,0 +1,404 @@
|
|
|
1
|
+
"""gRPC-backed kafka-light transport adapters."""
|
|
2
|
+
|
|
3
|
+
# pyright: reportAttributeAccessIssue=false
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from collections.abc import Mapping, Sequence
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
import grpc
|
|
16
|
+
|
|
17
|
+
from ._generated import kafka_light_pb2, kafka_light_pb2_grpc
|
|
18
|
+
from .config import KafkaLightConfig
|
|
19
|
+
from .interfaces import EventConsumerRebalanceListener
|
|
20
|
+
from .models import PublishAck, ReceivedMessage, TopicPartition
|
|
21
|
+
|
|
22
|
+
_POLL_LIMIT = 1
|
|
23
|
+
_POLL_INTERVAL_SECONDS = 0.1
|
|
24
|
+
_HEARTBEAT_INTERVAL_SECONDS = 1.0
|
|
25
|
+
_SESSION_TIMEOUT_MS = 10_000
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _now_unix_ms() -> int:
|
|
31
|
+
return time.time_ns() // 1_000_000
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _encode_headers(
|
|
35
|
+
headers: Sequence[tuple[str, bytes]] | None,
|
|
36
|
+
) -> list[kafka_light_pb2.Header]:
|
|
37
|
+
if not headers:
|
|
38
|
+
return []
|
|
39
|
+
return [kafka_light_pb2.Header(key=key, value=value) for key, value in headers]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _decode_headers(
|
|
43
|
+
headers: Sequence[kafka_light_pb2.Header] | None,
|
|
44
|
+
) -> list[tuple[str, bytes]]:
|
|
45
|
+
if not headers:
|
|
46
|
+
return []
|
|
47
|
+
return [(header.key, bytes(header.value)) for header in headers]
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _encode_payload(value: object) -> bytes:
|
|
51
|
+
return json.dumps(value).encode("utf-8")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _decode_payload(raw_payload: bytes) -> Any:
|
|
55
|
+
return json.loads(raw_payload.decode("utf-8"))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _set_partition_zero(request: object) -> None:
|
|
59
|
+
if hasattr(request, "partition"):
|
|
60
|
+
request.partition = 0
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _target(config: KafkaLightConfig) -> str:
|
|
64
|
+
if not config.brokers:
|
|
65
|
+
raise ValueError("kafka-light requires at least one broker endpoint")
|
|
66
|
+
if len(config.brokers) > 1:
|
|
67
|
+
logger.warning(
|
|
68
|
+
"kafka-light currently uses only the first broker endpoint: %s",
|
|
69
|
+
config.brokers[0],
|
|
70
|
+
)
|
|
71
|
+
return config.brokers[0]
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _is_rejoin_error(error: grpc.aio.AioRpcError) -> bool:
|
|
75
|
+
return error.code() in {
|
|
76
|
+
grpc.StatusCode.FAILED_PRECONDITION,
|
|
77
|
+
grpc.StatusCode.PERMISSION_DENIED,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class KafkaLightProducerAdapter:
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
config: KafkaLightConfig,
|
|
85
|
+
*,
|
|
86
|
+
channel: grpc.aio.Channel | None = None,
|
|
87
|
+
stub: kafka_light_pb2_grpc.KafkaLightStub | None = None,
|
|
88
|
+
) -> None:
|
|
89
|
+
self._config = config
|
|
90
|
+
self._channel = channel
|
|
91
|
+
self._stub = stub
|
|
92
|
+
self._owns_channel = channel is None and stub is None
|
|
93
|
+
|
|
94
|
+
async def start(self) -> None:
|
|
95
|
+
if self._stub is None:
|
|
96
|
+
if self._channel is None:
|
|
97
|
+
self._channel = grpc.aio.insecure_channel(_target(self._config))
|
|
98
|
+
self._stub = kafka_light_pb2_grpc.KafkaLightStub(self._channel)
|
|
99
|
+
|
|
100
|
+
async def stop(self) -> None:
|
|
101
|
+
if self._channel is not None and self._owns_channel:
|
|
102
|
+
await self._channel.close()
|
|
103
|
+
self._channel = None
|
|
104
|
+
self._stub = None
|
|
105
|
+
|
|
106
|
+
async def send_and_wait(
|
|
107
|
+
self,
|
|
108
|
+
topic: str,
|
|
109
|
+
*,
|
|
110
|
+
value: object,
|
|
111
|
+
key: str | None = None,
|
|
112
|
+
headers: Sequence[tuple[str, bytes]] | None = None,
|
|
113
|
+
) -> PublishAck:
|
|
114
|
+
stub = self._require_stub()
|
|
115
|
+
ack = await stub.Publish(
|
|
116
|
+
kafka_light_pb2.NewMessage(
|
|
117
|
+
topic=topic,
|
|
118
|
+
key=key or "",
|
|
119
|
+
payload_json=_encode_payload(value),
|
|
120
|
+
headers=_encode_headers(headers),
|
|
121
|
+
published_at_unix_ms=_now_unix_ms(),
|
|
122
|
+
)
|
|
123
|
+
)
|
|
124
|
+
return PublishAck(topic=ack.topic, partition=ack.partition, offset=ack.offset)
|
|
125
|
+
|
|
126
|
+
def _require_stub(self) -> kafka_light_pb2_grpc.KafkaLightStub:
|
|
127
|
+
if self._stub is None:
|
|
128
|
+
raise RuntimeError(f"kafka-light producer is not started ({_target(self._config)})")
|
|
129
|
+
return self._stub
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class KafkaLightConsumerAdapter:
|
|
133
|
+
def __init__(
|
|
134
|
+
self,
|
|
135
|
+
config: KafkaLightConfig,
|
|
136
|
+
*,
|
|
137
|
+
group_id: str,
|
|
138
|
+
topics: Sequence[str] | None = None,
|
|
139
|
+
auto_offset_reset: str = "earliest",
|
|
140
|
+
channel: grpc.aio.Channel | None = None,
|
|
141
|
+
stub: kafka_light_pb2_grpc.KafkaLightStub | None = None,
|
|
142
|
+
) -> None:
|
|
143
|
+
self._config = config
|
|
144
|
+
self._group_id = group_id
|
|
145
|
+
self._member_id = f"{group_id}-{uuid.uuid4().hex[:12]}"
|
|
146
|
+
self._topics = list(topics or [])
|
|
147
|
+
self._auto_offset_reset = auto_offset_reset
|
|
148
|
+
self._channel = channel
|
|
149
|
+
self._stub = stub
|
|
150
|
+
self._owns_channel = channel is None and stub is None
|
|
151
|
+
self._listener: EventConsumerRebalanceListener | None = None
|
|
152
|
+
self._assigned: set[TopicPartition] = set()
|
|
153
|
+
self._fetch_offsets: dict[str, int] = {}
|
|
154
|
+
self._topic_index = 0
|
|
155
|
+
self._generation = 0
|
|
156
|
+
self._coordination_lock = asyncio.Lock()
|
|
157
|
+
self._heartbeat_task: asyncio.Task[None] | None = None
|
|
158
|
+
|
|
159
|
+
def subscribe(
|
|
160
|
+
self,
|
|
161
|
+
topics: Sequence[str],
|
|
162
|
+
listener: EventConsumerRebalanceListener | None = None,
|
|
163
|
+
) -> None:
|
|
164
|
+
self._topics = list(topics)
|
|
165
|
+
self._listener = listener
|
|
166
|
+
|
|
167
|
+
async def start(self) -> None:
|
|
168
|
+
if self._stub is None:
|
|
169
|
+
if self._channel is None:
|
|
170
|
+
self._channel = grpc.aio.insecure_channel(_target(self._config))
|
|
171
|
+
self._stub = kafka_light_pb2_grpc.KafkaLightStub(self._channel)
|
|
172
|
+
|
|
173
|
+
await self._join_group()
|
|
174
|
+
self._heartbeat_task = asyncio.create_task(self._heartbeat_loop())
|
|
175
|
+
|
|
176
|
+
async def stop(self) -> None:
|
|
177
|
+
if self._heartbeat_task is not None:
|
|
178
|
+
self._heartbeat_task.cancel()
|
|
179
|
+
try:
|
|
180
|
+
await self._heartbeat_task
|
|
181
|
+
except asyncio.CancelledError:
|
|
182
|
+
pass
|
|
183
|
+
self._heartbeat_task = None
|
|
184
|
+
|
|
185
|
+
old_assigned = self._assigned
|
|
186
|
+
self._assigned = set()
|
|
187
|
+
self._fetch_offsets = {}
|
|
188
|
+
if self._listener is not None and old_assigned:
|
|
189
|
+
self._listener.on_partitions_revoked(old_assigned)
|
|
190
|
+
|
|
191
|
+
if self._stub is not None:
|
|
192
|
+
try:
|
|
193
|
+
await self._stub.LeaveGroup(
|
|
194
|
+
kafka_light_pb2.LeaveGroupRequest(
|
|
195
|
+
group_id=self._group_id,
|
|
196
|
+
member_id=self._member_id,
|
|
197
|
+
)
|
|
198
|
+
)
|
|
199
|
+
except grpc.aio.AioRpcError:
|
|
200
|
+
logger.exception("kafka-light leave-group failed")
|
|
201
|
+
|
|
202
|
+
self._generation = 0
|
|
203
|
+
if self._channel is not None and self._owns_channel:
|
|
204
|
+
await self._channel.close()
|
|
205
|
+
self._channel = None
|
|
206
|
+
self._stub = None
|
|
207
|
+
|
|
208
|
+
async def commit(self, offsets: Mapping[TopicPartition, int]) -> None:
|
|
209
|
+
stub = self._require_stub()
|
|
210
|
+
async with self._coordination_lock:
|
|
211
|
+
for tp, offset in offsets.items():
|
|
212
|
+
if tp.partition != 0:
|
|
213
|
+
raise ValueError("kafka-light only supports partition 0")
|
|
214
|
+
try:
|
|
215
|
+
response = await stub.CommitOffsets(
|
|
216
|
+
_commit_offsets_request(
|
|
217
|
+
group_id=self._group_id,
|
|
218
|
+
member_id=self._member_id,
|
|
219
|
+
generation=self._generation,
|
|
220
|
+
topic=tp.topic,
|
|
221
|
+
next_offset=offset,
|
|
222
|
+
updated_at_unix_ms=_now_unix_ms(),
|
|
223
|
+
)
|
|
224
|
+
)
|
|
225
|
+
except grpc.aio.AioRpcError as error:
|
|
226
|
+
if _is_rejoin_error(error):
|
|
227
|
+
await self._join_group_locked()
|
|
228
|
+
raise
|
|
229
|
+
if not response.committed:
|
|
230
|
+
raise RuntimeError(f"kafka-light commit rejected for {tp.topic}@{offset}")
|
|
231
|
+
|
|
232
|
+
def seek(self, tp: TopicPartition, offset: int) -> None:
|
|
233
|
+
if tp.partition != 0:
|
|
234
|
+
raise ValueError("kafka-light only supports partition 0")
|
|
235
|
+
if tp not in self._assigned:
|
|
236
|
+
raise ValueError(f"kafka-light topic-partition is not assigned: {tp}")
|
|
237
|
+
self._fetch_offsets[tp.topic] = offset
|
|
238
|
+
|
|
239
|
+
def __aiter__(self) -> KafkaLightConsumerAdapter:
|
|
240
|
+
return self
|
|
241
|
+
|
|
242
|
+
async def __anext__(self) -> ReceivedMessage:
|
|
243
|
+
while True:
|
|
244
|
+
topics = [tp.topic for tp in sorted(self._assigned, key=lambda tp: tp.topic)]
|
|
245
|
+
if not topics:
|
|
246
|
+
await asyncio.sleep(_POLL_INTERVAL_SECONDS)
|
|
247
|
+
continue
|
|
248
|
+
|
|
249
|
+
for _ in range(len(topics)):
|
|
250
|
+
topic = topics[self._topic_index % len(topics)]
|
|
251
|
+
self._topic_index = (self._topic_index + 1) % len(topics)
|
|
252
|
+
message = await self._read_one(topic)
|
|
253
|
+
if message is not None:
|
|
254
|
+
self._fetch_offsets[topic] = message.offset + 1
|
|
255
|
+
return message
|
|
256
|
+
await asyncio.sleep(_POLL_INTERVAL_SECONDS)
|
|
257
|
+
|
|
258
|
+
async def _heartbeat_loop(self) -> None:
|
|
259
|
+
stub = self._require_stub()
|
|
260
|
+
try:
|
|
261
|
+
while True:
|
|
262
|
+
await asyncio.sleep(_HEARTBEAT_INTERVAL_SECONDS)
|
|
263
|
+
async with self._coordination_lock:
|
|
264
|
+
response = await stub.Heartbeat(
|
|
265
|
+
kafka_light_pb2.HeartbeatRequest(
|
|
266
|
+
group_id=self._group_id,
|
|
267
|
+
member_id=self._member_id,
|
|
268
|
+
generation=self._generation,
|
|
269
|
+
heartbeat_at_unix_ms=_now_unix_ms(),
|
|
270
|
+
)
|
|
271
|
+
)
|
|
272
|
+
if response.needs_rejoin:
|
|
273
|
+
await self._join_group_locked()
|
|
274
|
+
except asyncio.CancelledError:
|
|
275
|
+
raise
|
|
276
|
+
except grpc.aio.AioRpcError:
|
|
277
|
+
logger.exception("kafka-light heartbeat failed")
|
|
278
|
+
|
|
279
|
+
async def _join_group(self) -> None:
|
|
280
|
+
async with self._coordination_lock:
|
|
281
|
+
await self._join_group_locked()
|
|
282
|
+
|
|
283
|
+
async def _join_group_locked(self) -> None:
|
|
284
|
+
if self._auto_offset_reset != "earliest":
|
|
285
|
+
raise ValueError(
|
|
286
|
+
"kafka-light currently supports committed offsets or auto_offset_reset='earliest'"
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
response = await self._require_stub().JoinGroup(
|
|
290
|
+
_join_group_request(
|
|
291
|
+
group_id=self._group_id,
|
|
292
|
+
member_id=self._member_id,
|
|
293
|
+
topics=self._topics,
|
|
294
|
+
session_timeout_ms=_SESSION_TIMEOUT_MS,
|
|
295
|
+
joined_at_unix_ms=_now_unix_ms(),
|
|
296
|
+
)
|
|
297
|
+
)
|
|
298
|
+
self._generation = response.generation
|
|
299
|
+
new_assigned = {TopicPartition(item.topic, item.partition) for item in response.assignments}
|
|
300
|
+
revoked = self._assigned - new_assigned
|
|
301
|
+
assigned = new_assigned - self._assigned
|
|
302
|
+
if self._listener is not None and revoked:
|
|
303
|
+
self._listener.on_partitions_revoked(revoked)
|
|
304
|
+
self._assigned = new_assigned
|
|
305
|
+
self._fetch_offsets = {item.topic: int(item.next_offset) for item in response.assignments}
|
|
306
|
+
if self._listener is not None and assigned:
|
|
307
|
+
self._listener.on_partitions_assigned(assigned)
|
|
308
|
+
|
|
309
|
+
async def _read_one(self, topic: str) -> ReceivedMessage | None:
|
|
310
|
+
stub = self._require_stub()
|
|
311
|
+
try:
|
|
312
|
+
response = await stub.PollMessages(
|
|
313
|
+
_poll_messages_request(
|
|
314
|
+
group_id=self._group_id,
|
|
315
|
+
member_id=self._member_id,
|
|
316
|
+
generation=self._generation,
|
|
317
|
+
topic=topic,
|
|
318
|
+
start_offset=self._fetch_offsets.get(topic, 0),
|
|
319
|
+
limit=_POLL_LIMIT,
|
|
320
|
+
)
|
|
321
|
+
)
|
|
322
|
+
except grpc.aio.AioRpcError as error:
|
|
323
|
+
if _is_rejoin_error(error):
|
|
324
|
+
await self._join_group()
|
|
325
|
+
return None
|
|
326
|
+
raise
|
|
327
|
+
if not response.messages:
|
|
328
|
+
return None
|
|
329
|
+
raw = response.messages[0]
|
|
330
|
+
return ReceivedMessage(
|
|
331
|
+
topic=raw.topic,
|
|
332
|
+
partition=raw.partition,
|
|
333
|
+
offset=raw.offset,
|
|
334
|
+
value=_decode_payload(bytes(raw.payload_json)),
|
|
335
|
+
headers=_decode_headers(raw.headers),
|
|
336
|
+
key=raw.key or None,
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
def _require_stub(self) -> kafka_light_pb2_grpc.KafkaLightStub:
|
|
340
|
+
if self._stub is None:
|
|
341
|
+
raise RuntimeError(f"kafka-light consumer is not started ({_target(self._config)})")
|
|
342
|
+
return self._stub
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def _join_group_request(
|
|
346
|
+
*,
|
|
347
|
+
group_id: str,
|
|
348
|
+
member_id: str,
|
|
349
|
+
topics: Sequence[str],
|
|
350
|
+
session_timeout_ms: int,
|
|
351
|
+
joined_at_unix_ms: int,
|
|
352
|
+
) -> kafka_light_pb2.JoinGroupRequest:
|
|
353
|
+
request = kafka_light_pb2.JoinGroupRequest(
|
|
354
|
+
group_id=group_id,
|
|
355
|
+
member_id=member_id,
|
|
356
|
+
topics=list(topics),
|
|
357
|
+
session_timeout_ms=session_timeout_ms,
|
|
358
|
+
joined_at_unix_ms=joined_at_unix_ms,
|
|
359
|
+
)
|
|
360
|
+
_set_partition_zero(request)
|
|
361
|
+
return request
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def _poll_messages_request(
|
|
365
|
+
*,
|
|
366
|
+
group_id: str,
|
|
367
|
+
member_id: str,
|
|
368
|
+
generation: int,
|
|
369
|
+
topic: str,
|
|
370
|
+
start_offset: int,
|
|
371
|
+
limit: int,
|
|
372
|
+
) -> kafka_light_pb2.PollMessagesRequest:
|
|
373
|
+
request = kafka_light_pb2.PollMessagesRequest(
|
|
374
|
+
group_id=group_id,
|
|
375
|
+
member_id=member_id,
|
|
376
|
+
generation=generation,
|
|
377
|
+
topic=topic,
|
|
378
|
+
start_offset=start_offset,
|
|
379
|
+
limit=limit,
|
|
380
|
+
wait_ms=0,
|
|
381
|
+
)
|
|
382
|
+
_set_partition_zero(request)
|
|
383
|
+
return request
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def _commit_offsets_request(
|
|
387
|
+
*,
|
|
388
|
+
group_id: str,
|
|
389
|
+
member_id: str,
|
|
390
|
+
generation: int,
|
|
391
|
+
topic: str,
|
|
392
|
+
next_offset: int,
|
|
393
|
+
updated_at_unix_ms: int,
|
|
394
|
+
) -> kafka_light_pb2.CommitOffsetsRequest:
|
|
395
|
+
request = kafka_light_pb2.CommitOffsetsRequest(
|
|
396
|
+
group_id=group_id,
|
|
397
|
+
member_id=member_id,
|
|
398
|
+
generation=generation,
|
|
399
|
+
topic=topic,
|
|
400
|
+
next_offset=next_offset,
|
|
401
|
+
updated_at_unix_ms=updated_at_unix_ms,
|
|
402
|
+
)
|
|
403
|
+
_set_partition_zero(request)
|
|
404
|
+
return request
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"""Shared configuration models for kafka-light clients."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(frozen=True)
|
|
9
|
+
class KafkaLightConfig:
|
|
10
|
+
brokers: list[str] = field(default_factory=lambda: ["127.0.0.1:7171"])
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Protocols for kafka-light producer and consumer implementations."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Mapping, Sequence
|
|
6
|
+
from typing import Protocol
|
|
7
|
+
|
|
8
|
+
from .models import PublishAck, ReceivedMessage, TopicPartition
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class EventConsumerRebalanceListener(Protocol):
|
|
12
|
+
def on_partitions_revoked(self, revoked: set[TopicPartition]) -> None: ...
|
|
13
|
+
|
|
14
|
+
def on_partitions_assigned(self, assigned: set[TopicPartition]) -> None: ...
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class EventProducer(Protocol):
|
|
18
|
+
async def start(self) -> None: ...
|
|
19
|
+
|
|
20
|
+
async def stop(self) -> None: ...
|
|
21
|
+
|
|
22
|
+
async def send_and_wait(
|
|
23
|
+
self,
|
|
24
|
+
topic: str,
|
|
25
|
+
*,
|
|
26
|
+
value: object,
|
|
27
|
+
key: str | None = None,
|
|
28
|
+
headers: Sequence[tuple[str, bytes]] | None = None,
|
|
29
|
+
) -> PublishAck: ...
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class EventConsumer(Protocol):
|
|
33
|
+
def subscribe(
|
|
34
|
+
self,
|
|
35
|
+
topics: Sequence[str],
|
|
36
|
+
listener: EventConsumerRebalanceListener | None = None,
|
|
37
|
+
) -> None: ...
|
|
38
|
+
|
|
39
|
+
async def start(self) -> None: ...
|
|
40
|
+
|
|
41
|
+
async def stop(self) -> None: ...
|
|
42
|
+
|
|
43
|
+
async def commit(self, offsets: Mapping[TopicPartition, int]) -> None: ...
|
|
44
|
+
|
|
45
|
+
def seek(self, tp: TopicPartition, offset: int) -> None: ...
|
|
46
|
+
|
|
47
|
+
def __aiter__(self) -> "EventConsumer": ...
|
|
48
|
+
|
|
49
|
+
async def __anext__(self) -> ReceivedMessage: ...
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Shared kafka-light producer and consumer models."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(frozen=True)
|
|
10
|
+
class TopicPartition:
|
|
11
|
+
topic: str
|
|
12
|
+
partition: int = 0
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(frozen=True)
|
|
16
|
+
class PublishAck:
|
|
17
|
+
topic: str
|
|
18
|
+
partition: int
|
|
19
|
+
offset: int
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ReceivedMessage:
|
|
24
|
+
topic: str
|
|
25
|
+
partition: int
|
|
26
|
+
offset: int
|
|
27
|
+
value: Any
|
|
28
|
+
headers: list[tuple[str, bytes]] = field(default_factory=list)
|
|
29
|
+
key: str | None = None
|