taskiq-redis 1.0.9__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,8 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: taskiq-redis
3
- Version: 1.0.9
3
+ Version: 1.1.1
4
4
  Summary: Redis integration for taskiq
5
+ License-File: LICENSE
5
6
  Keywords: taskiq,tasks,distributed,async,redis,result_backend
6
7
  Author: taskiq-team
7
8
  Author-email: taskiq@norely.com
@@ -13,9 +14,10 @@ Classifier: Programming Language :: Python :: 3.10
13
14
  Classifier: Programming Language :: Python :: 3.11
14
15
  Classifier: Programming Language :: Python :: 3.12
15
16
  Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Programming Language :: Python :: 3.14
16
18
  Classifier: Programming Language :: Python :: 3 :: Only
17
19
  Classifier: Programming Language :: Python :: 3.8
18
- Requires-Dist: redis (>=5,<6)
20
+ Requires-Dist: redis (>=6,<7)
19
21
  Requires-Dist: taskiq (>=0.11.12,<1)
20
22
  Project-URL: Homepage, https://github.com/taskiq-python/taskiq-redis
21
23
  Project-URL: Repository, https://github.com/taskiq-python/taskiq-redis
@@ -213,3 +215,17 @@ scheduler = TaskiqScheduler(broker, [array_source])
213
215
 
214
216
  During startup the scheduler will try to migrate schedules from an old source to a new one. Please be sure to specify different prefixe just to avoid any kind of collision between these two.
215
217
 
218
+
219
+ ## Dynamic queue names
220
+
221
+
222
+ Brokers supports dynamic queue names, allowing you to specify different queues when kicking tasks. This is useful for routing tasks to specific queues based on runtime conditions, such as priority levels, tenant isolation, or environment-specific processing.
223
+
224
+ Simply pass the desired queue name as message's label when kicking a task to override the broker's default queue configuration.
225
+
226
+ ```python
227
+ @broker.task(queue_name="low_priority")
228
+ async def low_priority_task() -> None:
229
+ print("I don't mind waiting a little longer")
230
+ ```
231
+
@@ -189,3 +189,17 @@ scheduler = TaskiqScheduler(broker, [array_source])
189
189
  ```
190
190
 
191
191
  During startup the scheduler will try to migrate schedules from an old source to a new one. Please be sure to specify different prefixe just to avoid any kind of collision between these two.
192
+
193
+
194
+ ## Dynamic queue names
195
+
196
+
197
+ Brokers supports dynamic queue names, allowing you to specify different queues when kicking tasks. This is useful for routing tasks to specific queues based on runtime conditions, such as priority levels, tenant isolation, or environment-specific processing.
198
+
199
+ Simply pass the desired queue name as message's label when kicking a task to override the broker's default queue configuration.
200
+
201
+ ```python
202
+ @broker.task(queue_name="low_priority")
203
+ async def low_priority_task() -> None:
204
+ print("I don't mind waiting a little longer")
205
+ ```
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "taskiq-redis"
3
- version = "1.0.9"
3
+ version = "1.1.1"
4
4
  description = "Redis integration for taskiq"
5
5
  authors = ["taskiq-team <taskiq@norely.com>"]
6
6
  readme = "README.md"
@@ -27,7 +27,7 @@ keywords = [
27
27
  [tool.poetry.dependencies]
28
28
  python = "^3.9"
29
29
  taskiq = ">=0.11.12,<1"
30
- redis = "^5"
30
+ redis = "^6"
31
31
 
32
32
  [tool.poetry.group.dev.dependencies]
33
33
  pytest = "^8"
@@ -130,7 +130,7 @@ class ListRedisScheduleSource(ScheduleSource):
130
130
  if key_time and key_time <= minute_before:
131
131
  time_keys.append(key.decode())
132
132
  for key in time_keys:
133
- schedules.extend(await redis.lrange(key, 0, -1)) # type: ignore
133
+ schedules.extend(await redis.lrange(key, 0, -1))
134
134
 
135
135
  return schedules
136
136
 
@@ -146,10 +146,10 @@ class ListRedisScheduleSource(ScheduleSource):
146
146
  )
147
147
  # We need to remove the schedule from the cron or time list.
148
148
  if schedule.cron is not None:
149
- await redis.lrem(self._get_cron_key(), 0, schedule_id) # type: ignore
149
+ await redis.lrem(self._get_cron_key(), 0, schedule_id)
150
150
  elif schedule.time is not None:
151
151
  time_key = self._get_time_key(schedule.time)
152
- await redis.lrem(time_key, 0, schedule_id) # type: ignore
152
+ await redis.lrem(time_key, 0, schedule_id)
153
153
 
154
154
  async def add_schedule(self, schedule: "ScheduledTask") -> None:
155
155
  """Add a schedule to the source."""
@@ -163,9 +163,9 @@ class ListRedisScheduleSource(ScheduleSource):
163
163
  # This is an optimization, so we can get all the schedules
164
164
  # for the current time much faster.
165
165
  if schedule.cron is not None:
166
- await redis.rpush(self._get_cron_key(), schedule.schedule_id) # type: ignore
166
+ await redis.rpush(self._get_cron_key(), schedule.schedule_id)
167
167
  elif schedule.time is not None:
168
- await redis.rpush( # type: ignore
168
+ await redis.rpush(
169
169
  self._get_time_key(schedule.time),
170
170
  schedule.schedule_id,
171
171
  )
@@ -195,11 +195,11 @@ class ListRedisScheduleSource(ScheduleSource):
195
195
  self._is_first_run = False
196
196
  async with Redis(connection_pool=self._connection_pool) as redis:
197
197
  buffer = []
198
- crons = await redis.lrange(self._get_cron_key(), 0, -1) # type: ignore
198
+ crons = await redis.lrange(self._get_cron_key(), 0, -1)
199
199
  logger.debug("Got %d cron schedules", len(crons))
200
200
  if crons:
201
201
  buffer.extend(crons)
202
- timed.extend(await redis.lrange(self._get_time_key(current_time), 0, -1)) # type: ignore
202
+ timed.extend(await redis.lrange(self._get_time_key(current_time), 0, -1))
203
203
  logger.debug("Got %d timed schedules", len(timed))
204
204
  if timed:
205
205
  buffer.extend(timed)
@@ -131,7 +131,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
131
131
  redis_set_params["px"] = self.result_px_time
132
132
 
133
133
  async with Redis(connection_pool=self.redis_pool) as redis:
134
- await redis.set(**redis_set_params) # type: ignore
134
+ await redis.set(**redis_set_params)
135
135
 
136
136
  async def is_result_ready(self, task_id: str) -> bool:
137
137
  """
@@ -205,7 +205,7 @@ class RedisAsyncResultBackend(AsyncResultBackend[_ReturnType]):
205
205
  redis_set_params["px"] = self.result_px_time
206
206
 
207
207
  async with Redis(connection_pool=self.redis_pool) as redis:
208
- await redis.set(**redis_set_params) # type: ignore
208
+ await redis.set(**redis_set_params)
209
209
 
210
210
  async def get_progress(
211
211
  self,
@@ -122,7 +122,7 @@ class ListQueueBroker(BaseRedisBroker):
122
122
  """
123
123
  queue_name = message.labels.get("queue_name") or self.queue_name
124
124
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
125
- await redis_conn.lpush(queue_name, message.message) # type: ignore
125
+ await redis_conn.lpush(queue_name, message.message)
126
126
 
127
127
  async def listen(self) -> AsyncGenerator[bytes, None]:
128
128
  """
@@ -137,7 +137,7 @@ class ListQueueBroker(BaseRedisBroker):
137
137
  while True:
138
138
  try:
139
139
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
140
- yield (await redis_conn.brpop(self.queue_name))[ # type: ignore
140
+ yield (await redis_conn.brpop(self.queue_name))[
141
141
  redis_brpop_data_position
142
142
  ]
143
143
  except ConnectionError as exc:
@@ -251,9 +251,10 @@ class RedisStreamBroker(BaseRedisBroker):
251
251
 
252
252
  :param message: message to append.
253
253
  """
254
+ queue_name = message.labels.get("queue_name") or self.queue_name
254
255
  async with Redis(connection_pool=self.connection_pool) as redis_conn:
255
256
  await redis_conn.xadd(
256
- self.queue_name,
257
+ queue_name,
257
258
  {b"data": message.message},
258
259
  maxlen=self.maxlen,
259
260
  approximate=self.approximate,
@@ -280,7 +281,7 @@ class RedisStreamBroker(BaseRedisBroker):
280
281
  self.consumer_name,
281
282
  {
282
283
  self.queue_name: ">",
283
- **self.additional_streams, # type: ignore
284
+ **self.additional_streams,
284
285
  },
285
286
  block=self.block,
286
287
  noack=False,
@@ -310,7 +311,7 @@ class RedisStreamBroker(BaseRedisBroker):
310
311
  )
311
312
  logger.debug(
312
313
  "Found %d pending messages in stream %s",
313
- len(pending),
314
+ len(pending[1]),
314
315
  stream,
315
316
  )
316
317
  for msg_id, msg in pending[1]:
@@ -55,7 +55,8 @@ class ListQueueClusterBroker(BaseRedisClusterBroker):
55
55
 
56
56
  :param message: message to append.
57
57
  """
58
- await self.redis.lpush(self.queue_name, message.message) # type: ignore
58
+ queue_name = message.labels.get("queue_name") or self.queue_name
59
+ await self.redis.lpush(queue_name, message.message) # type: ignore
59
60
 
60
61
  async def listen(self) -> AsyncGenerator[bytes, None]:
61
62
  """
@@ -162,8 +163,9 @@ class RedisStreamClusterBroker(BaseRedisClusterBroker):
162
163
 
163
164
  :param message: message to append.
164
165
  """
166
+ queue_name = message.labels.get("queue_name") or self.queue_name
165
167
  await self.redis.xadd(
166
- self.queue_name,
168
+ queue_name,
167
169
  {b"data": message.message},
168
170
  maxlen=self.maxlen,
169
171
  approximate=self.approximate,
@@ -230,9 +230,10 @@ class RedisStreamSentinelBroker(BaseSentinelBroker):
230
230
 
231
231
  :param message: message to append.
232
232
  """
233
+ queue_name = message.labels.get("queue_name") or self.queue_name
233
234
  async with self._acquire_master_conn() as redis_conn:
234
235
  await redis_conn.xadd(
235
- self.queue_name,
236
+ queue_name,
236
237
  {b"data": message.message},
237
238
  maxlen=self.maxlen,
238
239
  approximate=self.approximate,
File without changes