taskiq-redis 1.0.8__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: taskiq-redis
3
- Version: 1.0.8
3
+ Version: 1.1.0
4
4
  Summary: Redis integration for taskiq
5
5
  Keywords: taskiq,tasks,distributed,async,redis,result_backend
6
6
  Author: taskiq-team
@@ -15,7 +15,7 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Programming Language :: Python :: 3 :: Only
17
17
  Classifier: Programming Language :: Python :: 3.8
18
- Requires-Dist: redis (>=5,<6)
18
+ Requires-Dist: redis (>=6,<7)
19
19
  Requires-Dist: taskiq (>=0.11.12,<1)
20
20
  Project-URL: Homepage, https://github.com/taskiq-python/taskiq-redis
21
21
  Project-URL: Repository, https://github.com/taskiq-python/taskiq-redis
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "taskiq-redis"
3
- version = "1.0.8"
3
+ version = "1.1.0"
4
4
  description = "Redis integration for taskiq"
5
5
  authors = ["taskiq-team <taskiq@norely.com>"]
6
6
  readme = "README.md"
@@ -27,7 +27,7 @@ keywords = [
27
27
  [tool.poetry.dependencies]
28
28
  python = "^3.9"
29
29
  taskiq = ">=0.11.12,<1"
30
- redis = "^5"
30
+ redis = "^6"
31
31
 
32
32
  [tool.poetry.group.dev.dependencies]
33
33
  pytest = "^8"
@@ -130,7 +130,7 @@ class ListRedisScheduleSource(ScheduleSource):
130
130
  if key_time and key_time <= minute_before:
131
131
  time_keys.append(key.decode())
132
132
  for key in time_keys:
133
- schedules.extend(await redis.lrange(key, 0, -1)) # type: ignore
133
+ schedules.extend(await redis.lrange(key, 0, -1))
134
134
 
135
135
  return schedules
136
136
 
@@ -146,10 +146,10 @@ class ListRedisScheduleSource(ScheduleSource):
146
146
  )
147
147
  # We need to remove the schedule from the cron or time list.
148
148
  if schedule.cron is not None:
149
- await redis.lrem(self._get_cron_key(), 0, schedule_id) # type: ignore
149
+ await redis.lrem(self._get_cron_key(), 0, schedule_id)
150
150
  elif schedule.time is not None:
151
151
  time_key = self._get_time_key(schedule.time)
152
- await redis.lrem(time_key, 0, schedule_id) # type: ignore
152
+ await redis.lrem(time_key, 0, schedule_id)
153
153
 
154
154
  async def add_schedule(self, schedule: "ScheduledTask") -> None:
155
155
  """Add a schedule to the source."""
@@ -163,9 +163,9 @@ class ListRedisScheduleSource(ScheduleSource):
163
163
  # This is an optimization, so we can get all the schedules
164
164
  # for the current time much faster.
165
165
  if schedule.cron is not None:
166
- await redis.rpush(self._get_cron_key(), schedule.schedule_id) # type: ignore
166
+ await redis.rpush(self._get_cron_key(), schedule.schedule_id)
167
167
  elif schedule.time is not None:
168
- await redis.rpush( # type: ignore
168
+ await redis.rpush(
169
169
  self._get_time_key(schedule.time),
170
170
  schedule.schedule_id,
171
171
  )
@@ -195,11 +195,11 @@ class ListRedisScheduleSource(ScheduleSource):
195
195
  self._is_first_run = False
196
196
  async with Redis(connection_pool=self._connection_pool) as redis:
197
197
  buffer = []
198
- crons = await redis.lrange(self._get_cron_key(), 0, -1) # type: ignore
198
+ crons = await redis.lrange(self._get_cron_key(), 0, -1)
199
199
  logger.debug("Got %d cron schedules", len(crons))
200
200
  if crons:
201
201
  buffer.extend(crons)
202
- timed.extend(await redis.lrange(self._get_time_key(current_time), 0, -1)) # type: ignore
202
+ timed.extend(await redis.lrange(self._get_time_key(current_time), 0, -1))
203
203
  logger.debug("Got %d timed schedules", len(timed))
204
204
  if timed:
205
205
  buffer.extend(timed)
@@ -131,7 +131,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
131
131
  redis_set_params["px"] = self.result_px_time
132
132
 
133
133
  async with Redis(connection_pool=self.redis_pool) as redis:
134
- await redis.set(**redis_set_params) # type: ignore
134
+ await redis.set(**redis_set_params)
135
135
 
136
136
  async def is_result_ready(self, task_id: str) -> bool:
137
137
  """
@@ -205,7 +205,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
205
205
  redis_set_params["px"] = self.result_px_time
206
206
 
207
207
  async with Redis(connection_pool=self.redis_pool) as redis:
208
- await redis.set(**redis_set_params) # type: ignore
208
+ await redis.set(**redis_set_params)
209
209
 
210
210
  async def get_progress(
211
211
  self,
@@ -122,7 +122,7 @@ class ListQueueBroker(BaseRedisBroker):
122
122
  """
123
123
  queue_name = message.labels.get("queue_name") or self.queue_name
124
124
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
125
- await redis_conn.lpush(queue_name, message.message) # type: ignore
125
+ await redis_conn.lpush(queue_name, message.message)
126
126
 
127
127
  async def listen(self) -> AsyncGenerator[bytes, None]:
128
128
  """
@@ -137,7 +137,7 @@ class ListQueueBroker(BaseRedisBroker):
137
137
  while True:
138
138
  try:
139
139
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
140
- yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
140
+ yield (await redis_conn.brpop(self.queue_name))[
141
141
  redis_brpop_data_position
142
142
  ]
143
143
  except ConnectionError as exc:
@@ -166,6 +166,7 @@ class RedisStreamBroker(BaseRedisBroker):
166
166
  mkstream: bool = True,
167
167
  xread_block: int = 2000,
168
168
  maxlen: Optional[int] = None,
169
+ approximate: bool = True,
169
170
  idle_timeout: int = 600000, # 10 minutes
170
171
  unacknowledged_batch_size: int = 100,
171
172
  xread_count: Optional[int] = 100,
@@ -190,6 +191,8 @@ class RedisStreamBroker(BaseRedisBroker):
190
191
  Better to set it to a bigger value, to avoid unnecessary calls.
191
192
  :param maxlen: sets the maximum length of the stream
192
193
  trims (the old values of) the stream each time a new element is added
194
+ :param approximate: decides wether to trim the stream immediately (False) or
195
+ later on (True)
193
196
  :param xread_count: number of messages to fetch from the stream at once.
194
197
  :param additional_streams: additional streams to read from.
195
198
  Each key is a stream name, value is a consumer id.
@@ -210,6 +213,7 @@ class RedisStreamBroker(BaseRedisBroker):
210
213
  self.mkstream = mkstream
211
214
  self.block = xread_block
212
215
  self.maxlen = maxlen
216
+ self.approximate = approximate
213
217
  self.additional_streams = additional_streams or {}
214
218
  self.idle_timeout = idle_timeout
215
219
  self.unacknowledged_batch_size = unacknowledged_batch_size
@@ -252,6 +256,7 @@ class RedisStreamBroker(BaseRedisBroker):
252
256
  self.queue_name,
253
257
  {b"data": message.message},
254
258
  maxlen=self.maxlen,
259
+ approximate=self.approximate,
255
260
  )
256
261
 
257
262
  def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
@@ -275,7 +280,7 @@ class RedisStreamBroker(BaseRedisBroker):
275
280
  self.consumer_name,
276
281
  {
277
282
  self.queue_name: ">",
278
- **self.additional_streams, # type: ignore
283
+ **self.additional_streams,
279
284
  },
280
285
  block=self.block,
281
286
  noack=False,
@@ -92,6 +92,8 @@ class RedisStreamClusterBroker(BaseRedisClusterBroker):
92
92
  consumer_id: str = "$",
93
93
  mkstream: bool = True,
94
94
  xread_block: int = 10000,
95
+ maxlen: Optional[int] = None,
96
+ approximate: bool = True,
95
97
  additional_streams: Optional[Dict[str, str]] = None,
96
98
  **connection_kwargs: Any,
97
99
  ) -> None:
@@ -111,6 +113,10 @@ class RedisStreamClusterBroker(BaseRedisClusterBroker):
111
113
  :param mkstream: create stream if it does not exist.
112
114
  :param xread_block: block time in ms for xreadgroup.
113
115
  Better to set it to a bigger value, to avoid unnecessary calls.
116
+ :param maxlen: sets the maximum length of the stream
117
+ trims (the old values of) the stream each time a new element is added
118
+ :param approximate: decides wether to trim the stream immediately (False) or
119
+ later on (True)
114
120
  :param additional_streams: additional streams to read from.
115
121
  Each key is a stream name, value is a consumer id.
116
122
  """
@@ -125,6 +131,8 @@ class RedisStreamClusterBroker(BaseRedisClusterBroker):
125
131
  self.consumer_id = consumer_id
126
132
  self.mkstream = mkstream
127
133
  self.block = xread_block
134
+ self.maxlen = maxlen
135
+ self.approximate = approximate
128
136
  self.additional_streams = additional_streams or {}
129
137
 
130
138
  async def _declare_consumer_group(self) -> None:
@@ -154,7 +162,12 @@ class RedisStreamClusterBroker(BaseRedisClusterBroker):
154
162
 
155
163
  :param message: message to append.
156
164
  """
157
- await self.redis.xadd(self.queue_name, {b"data": message.message})
165
+ await self.redis.xadd(
166
+ self.queue_name,
167
+ {b"data": message.message},
168
+ maxlen=self.maxlen,
169
+ approximate=self.approximate,
170
+ )
158
171
 
159
172
  def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
160
173
  async def _ack() -> None:
@@ -157,6 +157,8 @@ class RedisStreamSentinelBroker(BaseSentinelBroker):
157
157
  consumer_id: str = "$",
158
158
  mkstream: bool = True,
159
159
  xread_block: int = 10000,
160
+ maxlen: Optional[int] = None,
161
+ approximate: bool = True,
160
162
  additional_streams: Optional[Dict[str, str]] = None,
161
163
  **connection_kwargs: Any,
162
164
  ) -> None:
@@ -176,6 +178,10 @@ class RedisStreamSentinelBroker(BaseSentinelBroker):
176
178
  :param mkstream: create stream if it does not exist.
177
179
  :param xread_block: block time in ms for xreadgroup.
178
180
  Better to set it to a bigger value, to avoid unnecessary calls.
181
+ :param maxlen: sets the maximum length of the stream
182
+ trims (the old values of) the stream each time a new element is added
183
+ :param approximate: decides wether to trim the stream immediately (False) or
184
+ later on (True)
179
185
  :param additional_streams: additional streams to read from.
180
186
  Each key is a stream name, value is a consumer id.
181
187
  """
@@ -193,6 +199,8 @@ class RedisStreamSentinelBroker(BaseSentinelBroker):
193
199
  self.consumer_id = consumer_id
194
200
  self.mkstream = mkstream
195
201
  self.block = xread_block
202
+ self.maxlen = maxlen
203
+ self.approximate = approximate
196
204
  self.additional_streams = additional_streams or {}
197
205
 
198
206
  async def _declare_consumer_group(self) -> None:
@@ -223,7 +231,12 @@ class RedisStreamSentinelBroker(BaseSentinelBroker):
223
231
  :param message: message to append.
224
232
  """
225
233
  async with self._acquire_master_conn() as redis_conn:
226
- await redis_conn.xadd(self.queue_name, {b"data": message.message})
234
+ await redis_conn.xadd(
235
+ self.queue_name,
236
+ {b"data": message.message},
237
+ maxlen=self.maxlen,
238
+ approximate=self.approximate,
239
+ )
227
240
 
228
241
  def _ack_generator(self, id: str) -> Callable[[], Awaitable[None]]:
229
242
  async def _ack() -> None:
File without changes
File without changes