matrice-inference 0.1.0__py3-none-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

Files changed (80) hide show
  1. matrice_inference/deploy/aggregator/aggregator.cpython-312-x86_64-linux-gnu.so +0 -0
  2. matrice_inference/deploy/aggregator/aggregator.pyi +55 -0
  3. matrice_inference/deploy/aggregator/analytics.cpython-312-x86_64-linux-gnu.so +0 -0
  4. matrice_inference/deploy/aggregator/analytics.pyi +63 -0
  5. matrice_inference/deploy/aggregator/ingestor.cpython-312-x86_64-linux-gnu.so +0 -0
  6. matrice_inference/deploy/aggregator/ingestor.pyi +79 -0
  7. matrice_inference/deploy/aggregator/pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  8. matrice_inference/deploy/aggregator/pipeline.pyi +139 -0
  9. matrice_inference/deploy/aggregator/publisher.cpython-312-x86_64-linux-gnu.so +0 -0
  10. matrice_inference/deploy/aggregator/publisher.pyi +59 -0
  11. matrice_inference/deploy/aggregator/synchronizer.cpython-312-x86_64-linux-gnu.so +0 -0
  12. matrice_inference/deploy/aggregator/synchronizer.pyi +58 -0
  13. matrice_inference/deploy/client/auto_streaming/auto_streaming.cpython-312-x86_64-linux-gnu.so +0 -0
  14. matrice_inference/deploy/client/auto_streaming/auto_streaming.pyi +145 -0
  15. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  16. matrice_inference/deploy/client/auto_streaming/auto_streaming_utils.pyi +126 -0
  17. matrice_inference/deploy/client/client.cpython-312-x86_64-linux-gnu.so +0 -0
  18. matrice_inference/deploy/client/client.pyi +337 -0
  19. matrice_inference/deploy/client/client_stream_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  20. matrice_inference/deploy/client/client_stream_utils.pyi +83 -0
  21. matrice_inference/deploy/client/client_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  22. matrice_inference/deploy/client/client_utils.pyi +77 -0
  23. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.cpython-312-x86_64-linux-gnu.so +0 -0
  24. matrice_inference/deploy/client/streaming_gateway/streaming_gateway.pyi +120 -0
  25. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  26. matrice_inference/deploy/client/streaming_gateway/streaming_gateway_utils.pyi +442 -0
  27. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.cpython-312-x86_64-linux-gnu.so +0 -0
  28. matrice_inference/deploy/client/streaming_gateway/streaming_results_handler.pyi +19 -0
  29. matrice_inference/deploy/optimize/cache_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  30. matrice_inference/deploy/optimize/cache_manager.pyi +15 -0
  31. matrice_inference/deploy/optimize/frame_comparators.cpython-312-x86_64-linux-gnu.so +0 -0
  32. matrice_inference/deploy/optimize/frame_comparators.pyi +203 -0
  33. matrice_inference/deploy/optimize/frame_difference.cpython-312-x86_64-linux-gnu.so +0 -0
  34. matrice_inference/deploy/optimize/frame_difference.pyi +165 -0
  35. matrice_inference/deploy/optimize/transmission.cpython-312-x86_64-linux-gnu.so +0 -0
  36. matrice_inference/deploy/optimize/transmission.pyi +97 -0
  37. matrice_inference/deploy/server/inference/batch_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  38. matrice_inference/deploy/server/inference/batch_manager.pyi +50 -0
  39. matrice_inference/deploy/server/inference/inference_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  40. matrice_inference/deploy/server/inference/inference_interface.pyi +114 -0
  41. matrice_inference/deploy/server/inference/model_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  42. matrice_inference/deploy/server/inference/model_manager.pyi +80 -0
  43. matrice_inference/deploy/server/inference/triton_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  44. matrice_inference/deploy/server/inference/triton_utils.pyi +115 -0
  45. matrice_inference/deploy/server/proxy/proxy_interface.cpython-312-x86_64-linux-gnu.so +0 -0
  46. matrice_inference/deploy/server/proxy/proxy_interface.pyi +90 -0
  47. matrice_inference/deploy/server/proxy/proxy_utils.cpython-312-x86_64-linux-gnu.so +0 -0
  48. matrice_inference/deploy/server/proxy/proxy_utils.pyi +113 -0
  49. matrice_inference/deploy/server/server.cpython-312-x86_64-linux-gnu.so +0 -0
  50. matrice_inference/deploy/server/server.pyi +155 -0
  51. matrice_inference/deploy/server/stream/inference_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  52. matrice_inference/deploy/server/stream/inference_worker.pyi +56 -0
  53. matrice_inference/deploy/server/stream/kafka_consumer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  54. matrice_inference/deploy/server/stream/kafka_consumer_worker.pyi +51 -0
  55. matrice_inference/deploy/server/stream/kafka_producer_worker.cpython-312-x86_64-linux-gnu.so +0 -0
  56. matrice_inference/deploy/server/stream/kafka_producer_worker.pyi +50 -0
  57. matrice_inference/deploy/server/stream/stream_debug_logger.cpython-312-x86_64-linux-gnu.so +0 -0
  58. matrice_inference/deploy/server/stream/stream_debug_logger.pyi +47 -0
  59. matrice_inference/deploy/server/stream/stream_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  60. matrice_inference/deploy/server/stream/stream_manager.pyi +69 -0
  61. matrice_inference/deploy/server/stream/video_buffer.cpython-312-x86_64-linux-gnu.so +0 -0
  62. matrice_inference/deploy/server/stream/video_buffer.pyi +120 -0
  63. matrice_inference/deploy/stream/kafka_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  64. matrice_inference/deploy/stream/kafka_stream.pyi +444 -0
  65. matrice_inference/deploy/stream/redis_stream.cpython-312-x86_64-linux-gnu.so +0 -0
  66. matrice_inference/deploy/stream/redis_stream.pyi +447 -0
  67. matrice_inference/deployment/camera_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  68. matrice_inference/deployment/camera_manager.pyi +669 -0
  69. matrice_inference/deployment/deployment.cpython-312-x86_64-linux-gnu.so +0 -0
  70. matrice_inference/deployment/deployment.pyi +736 -0
  71. matrice_inference/deployment/inference_pipeline.cpython-312-x86_64-linux-gnu.so +0 -0
  72. matrice_inference/deployment/inference_pipeline.pyi +527 -0
  73. matrice_inference/deployment/streaming_gateway_manager.cpython-312-x86_64-linux-gnu.so +0 -0
  74. matrice_inference/deployment/streaming_gateway_manager.pyi +275 -0
  75. matrice_inference/py.typed +0 -0
  76. matrice_inference-0.1.0.dist-info/METADATA +26 -0
  77. matrice_inference-0.1.0.dist-info/RECORD +80 -0
  78. matrice_inference-0.1.0.dist-info/WHEEL +5 -0
  79. matrice_inference-0.1.0.dist-info/licenses/LICENSE.txt +21 -0
  80. matrice_inference-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,527 @@
1
+ """Auto-generated stub for module: inference_pipeline."""
2
+ from typing import Any, Dict, List, Optional, Set, Tuple
3
+
4
+ from camera_manager import CameraManager, Camera, CameraGroup, CameraGroupConfig, CameraConfig
5
+ from dataclasses import dataclass
6
+ from matrice_common.utils import handle_response
7
+ from streaming_gateway_manager import StreamingGatewayManager, StreamingGateway, StreamingGatewayConfig
8
+ import time
9
+
10
+ # Classes
11
+ class Aggregator:
12
+ """
13
+ Aggregator configuration for inference pipelines.
14
+
15
+ Attributes:
16
+ id: Unique identifier for the aggregator (MongoDB ObjectID)
17
+ action_id: ID of the associated action (MongoDB ObjectID)
18
+ status: Status of the aggregator
19
+ is_running: Whether the aggregator is currently running
20
+ created_at: Creation timestamp
21
+ updated_at: Last update timestamp
22
+ """
23
+
24
+ def from_dict(cls: Any, data: Dict) -> Any: ...
25
+ """
26
+ Create an Aggregator instance from API response data.
27
+ """
28
+
29
+ def to_dict(self: Any) -> Dict: ...
30
+ """
31
+ Convert the aggregator to a dictionary for API calls.
32
+ """
33
+
34
+ class ApplicationDeployment:
35
+ """
36
+ Application deployment configuration for inference pipelines.
37
+
38
+ Attributes:
39
+ application_id: ID of the application
40
+ application_version: Version of the application
41
+ deployment_id: ID of the deployment (optional)
42
+ status: Status of the application deployment
43
+ """
44
+
45
+ def from_dict(cls: Any, data: Dict) -> Any: ...
46
+ """
47
+ Create an ApplicationDeployment instance from API response data.
48
+ """
49
+
50
+ def to_dict(self: Any) -> Dict: ...
51
+ """
52
+ Convert the application deployment to a dictionary for API calls.
53
+ """
54
+
55
+ class InferencePipeline:
56
+ """
57
+ Inference pipeline instance for managing a specific ML model deployment orchestration.
58
+
59
+ This class provides methods to start, stop, monitor, and manage a single inference pipeline
60
+ that orchestrates the deployment and execution of machine learning models for
61
+ real-time data processing and inference.
62
+
63
+ Example:
64
+ Working with a specific inference pipeline:
65
+ ```python
66
+ from matrice import Session
67
+ from matrice_inference.deployment.inference_pipeline import InferencePipeline
68
+
69
+ session = Session(account_number="...", access_key="...", secret_key="...")
70
+
71
+ # Load existing pipeline
72
+ pipeline = InferencePipeline(session, pipeline_id="664ab1df23abcf1c33123456")
73
+
74
+ # Start the pipeline
75
+ result, error, message = pipeline.start()
76
+ if not error:
77
+ print("Pipeline started successfully")
78
+
79
+ # Check status
80
+ status, error, message = pipeline.get_status()
81
+ if not error:
82
+ print(f"Pipeline status: {status}")
83
+
84
+ # Stop the pipeline
85
+ result, error, message = pipeline.stop()
86
+ ```
87
+ """
88
+
89
+ def __init__(self: Any, session: Any, config: Any = None, pipeline_id: str = None) -> None: ...
90
+ """
91
+ Initialize an InferencePipeline instance.
92
+
93
+ Args:
94
+ session: Session object containing RPC client for API communication
95
+ config: InferencePipelineConfig object (for new pipelines)
96
+ pipeline_id: The ID of an existing pipeline to load
97
+ """
98
+
99
+ def add_camera_groups_to_streaming_gateway(self: Any, gateway_id: str, camera_group_ids: List[str]) -> Tuple[Optional[Dict], Optional[str], str]: ...
100
+ """
101
+ Add camera groups to a streaming gateway.
102
+
103
+ Args:
104
+ gateway_id: The ID of the streaming gateway
105
+ camera_group_ids: List of camera group IDs to add
106
+
107
+ Returns:
108
+ tuple: (result, error, message)
109
+ """
110
+
111
+ def add_cameras_to_group(self: Any, group_id: str, camera_configs: List[CameraConfig]) -> Tuple[Optional[List['Camera']], Optional[str], str]: ...
112
+ """
113
+ Add multiple cameras to a camera group in this inference pipeline.
114
+
115
+ Args:
116
+ group_id: The ID of the camera group
117
+ camera_configs: List of CameraConfig objects
118
+
119
+ Returns:
120
+ tuple: (camera_instances, error, message)
121
+ """
122
+
123
+ def aggregators(self: Any) -> List[Aggregator]: ...
124
+ """
125
+ Get the pipeline aggregators.
126
+ """
127
+
128
+ def aggregators(self: Any, value: List[Aggregator]) -> Any: ...
129
+ """
130
+ Set the pipeline aggregators.
131
+ """
132
+
133
+ def applications(self: Any) -> List[ApplicationDeployment]: ...
134
+ """
135
+ Get the pipeline applications.
136
+ """
137
+
138
+ def applications(self: Any, value: List[ApplicationDeployment]) -> Any: ...
139
+ """
140
+ Set the pipeline applications.
141
+ """
142
+
143
+ def config(self: Any) -> Optional[InferencePipelineConfig]: ...
144
+ """
145
+ Get the pipeline configuration.
146
+ """
147
+
148
+ def config(self: Any, value: Any) -> Any: ...
149
+ """
150
+ Set the pipeline configuration.
151
+ """
152
+
153
+ def create_camera(self: Any, camera_config: Any) -> Tuple[Optional['Camera'], Optional[str], str]: ...
154
+ """
155
+ Create a camera for this inference pipeline.
156
+
157
+ Args:
158
+ camera_config: CameraConfig object containing the camera configuration
159
+
160
+ Returns:
161
+ tuple: (camera_instance, error, message)
162
+ """
163
+
164
+ def create_camera_group(self: Any, group: Any) -> Tuple[Optional['CameraGroup'], Optional[str], str]: ...
165
+ """
166
+ Create a camera group for this inference pipeline.
167
+
168
+ Args:
169
+ group: CameraGroupConfig object containing the group configuration
170
+
171
+ Returns:
172
+ tuple: (camera_group_instance, error, message)
173
+ """
174
+
175
+ def create_streaming_gateway(self: Any, gateway_config: Any) -> Tuple[Optional['StreamingGateway'], Optional[str], str]: ...
176
+ """
177
+ Create a streaming gateway for this inference pipeline.
178
+
179
+ Args:
180
+ gateway_config: StreamingGatewayConfig object containing the gateway configuration
181
+
182
+ Returns:
183
+ tuple: (streaming_gateway, error, message)
184
+ """
185
+
186
+ def delete(self: Any, force: bool = False) -> Tuple[Optional[Dict], Optional[str], str]: ...
187
+ """
188
+ Delete this inference pipeline and clean up all associated resources.
189
+
190
+ Args:
191
+ force: Force delete even if active
192
+
193
+ Returns:
194
+ tuple: (result, error, message)
195
+ """
196
+
197
+ def delete_streaming_gateway(self: Any, gateway_id: str, force: bool = False) -> Tuple[Optional[Dict], Optional[str], str]: ...
198
+ """
199
+ Delete a streaming gateway by its ID.
200
+
201
+ Args:
202
+ gateway_id: The ID of the streaming gateway to delete
203
+ force: Force delete even if active
204
+
205
+ Returns:
206
+ tuple: (result, error, message)
207
+ """
208
+
209
+ def deployment_ids(self: Any) -> List[str]: ...
210
+ """
211
+ Get the deployment IDs.
212
+ """
213
+
214
+ def description(self: Any) -> str: ...
215
+ """
216
+ Get the pipeline description.
217
+ """
218
+
219
+ def description(self: Any, value: str) -> Any: ...
220
+ """
221
+ Set the pipeline description.
222
+ """
223
+
224
+ def get_camera_group_by_id(self: Any, group_id: str) -> Tuple[Optional['CameraGroup'], Optional[str], str]: ...
225
+ """
226
+ Get a camera group by its ID.
227
+
228
+ Args:
229
+ group_id: The ID of the camera group to retrieve
230
+
231
+ Returns:
232
+ tuple: (camera_group_instance, error, message)
233
+ """
234
+
235
+ def get_camera_groups(self: Any, page: int = 1, limit: int = 10, search: str = None) -> Tuple[Optional[List['CameraGroup']], Optional[str], str]: ...
236
+ """
237
+ Get camera groups for this inference pipeline.
238
+
239
+ Args:
240
+ page: Page number for pagination
241
+ limit: Items per page
242
+ search: Optional search term
243
+
244
+ Returns:
245
+ tuple: (camera_group_instances, error, message)
246
+ """
247
+
248
+ def get_cameras(self: Any, page: int = 1, limit: int = 10, search: str = None, group_id: str = None) -> Tuple[Optional[List['Camera']], Optional[str], str]: ...
249
+ """
250
+ Get cameras for this inference pipeline.
251
+
252
+ Args:
253
+ page: Page number for pagination
254
+ limit: Items per page
255
+ search: Optional search term
256
+ group_id: Optional filter by camera group ID
257
+
258
+ Returns:
259
+ tuple: (camera_instances, error, message)
260
+ """
261
+
262
+ def get_details(self: Any) -> Tuple[Optional[Dict], Optional[str], str]: ...
263
+ """
264
+ Retrieve detailed information about this inference pipeline.
265
+
266
+ Returns:
267
+ tuple: (pipeline_details, error, message)
268
+ """
269
+
270
+ def get_status(self: Any) -> Tuple[Optional[Dict], Optional[str], str]: ...
271
+ """
272
+ Retrieve the current status of this inference pipeline.
273
+
274
+ Returns:
275
+ tuple: (result, error, message)
276
+ """
277
+
278
+ def get_streaming_gateway_by_id(self: Any, gateway_id: str) -> Tuple[Optional['StreamingGateway'], Optional[str], str]: ...
279
+ """
280
+ Get a streaming gateway by its ID.
281
+
282
+ Args:
283
+ gateway_id: The ID of the streaming gateway to retrieve
284
+
285
+ Returns:
286
+ tuple: (streaming_gateway, error, message)
287
+ """
288
+
289
+ def get_streaming_gateways(self: Any, page: int = 1, limit: int = 10, search: str = None) -> Tuple[Optional[List['StreamingGateway']], Optional[str], str]: ...
290
+ """
291
+ Get streaming gateways for this inference pipeline.
292
+
293
+ Args:
294
+ page: Page number for pagination
295
+ limit: Items per page
296
+ search: Optional search term
297
+
298
+ Returns:
299
+ tuple: (streaming_gateways, error, message)
300
+ """
301
+
302
+ def id(self: Any) -> Optional[str]: ...
303
+ """
304
+ Get the pipeline ID.
305
+ """
306
+
307
+ def name(self: Any) -> str: ...
308
+ """
309
+ Get the pipeline name.
310
+ """
311
+
312
+ def name(self: Any, value: str) -> Any: ...
313
+ """
314
+ Set the pipeline name.
315
+ """
316
+
317
+ def refresh(self: Any) -> Any: ...
318
+ """
319
+ Refresh the pipeline configuration from the backend.
320
+ """
321
+
322
+ def remove_camera_groups_from_streaming_gateway(self: Any, gateway_id: str, camera_group_ids: List[str]) -> Tuple[Optional[Dict], Optional[str], str]: ...
323
+ """
324
+ Remove camera groups from a streaming gateway.
325
+
326
+ Args:
327
+ gateway_id: The ID of the streaming gateway
328
+ camera_group_ids: List of camera group IDs to remove
329
+
330
+ Returns:
331
+ tuple: (result, error, message)
332
+ """
333
+
334
+ def save(self: Any, project_id: str = None) -> Tuple[Optional[Dict], Optional[str], str]: ...
335
+ """
336
+ Save this inference pipeline to the backend.
337
+
338
+ Args:
339
+ project_id: The ID of the project (optional if set in config)
340
+
341
+ Returns:
342
+ tuple: (result, error, message)
343
+ """
344
+
345
+ def start(self: Any) -> Tuple[Optional[Dict], Optional[str], str]: ...
346
+ """
347
+ Start this inference pipeline for real-time processing.
348
+
349
+ Returns:
350
+ tuple: (result, error, message)
351
+ """
352
+
353
+ def status(self: Any) -> Optional[str]: ...
354
+ """
355
+ Get the pipeline status.
356
+ """
357
+
358
+ def stop(self: Any, force: bool = False) -> Tuple[Optional[Dict], Optional[str], str]: ...
359
+ """
360
+ Stop this inference pipeline and clean up resources.
361
+
362
+ Args:
363
+ force: Force stop even if active streams exist
364
+
365
+ Returns:
366
+ tuple: (result, error, message)
367
+ """
368
+
369
+ def update(self: Any) -> Tuple[Optional[Dict], Optional[str], str]: ...
370
+ """
371
+ Update this inference pipeline with the current configuration.
372
+
373
+ Returns:
374
+ tuple: (result, error, message)
375
+ """
376
+
377
+ def update_streaming_gateway(self: Any, gateway_id: str, gateway_config: Any) -> Tuple[Optional[Dict], Optional[str], str]: ...
378
+ """
379
+ Update an existing streaming gateway.
380
+
381
+ Args:
382
+ gateway_id: The ID of the streaming gateway to update
383
+ gateway_config: StreamingGatewayConfig object with updated configuration
384
+
385
+ Returns:
386
+ tuple: (result, error, message)
387
+ """
388
+
389
+ def wait_for_active(self: Any, timeout: int = 300, poll_interval: int = 10) -> Tuple[bool, Optional[str], str]: ...
390
+ """
391
+ Wait for this pipeline to reach 'active' status.
392
+
393
+ Args:
394
+ timeout: Maximum time to wait in seconds
395
+ poll_interval: Time between status checks in seconds
396
+
397
+ Returns:
398
+ tuple: (is_active, error, message)
399
+ """
400
+
401
+ def wait_for_ready(self: Any, timeout: int = 300, poll_interval: int = 10) -> Tuple[bool, Optional[str], str]: ...
402
+ """
403
+ Wait for this pipeline to reach 'ready' status.
404
+
405
+ Args:
406
+ timeout: Maximum time to wait in seconds
407
+ poll_interval: Time between status checks in seconds
408
+
409
+ Returns:
410
+ tuple: (is_ready, error, message)
411
+ """
412
+
413
+ class InferencePipelineConfig:
414
+ """
415
+ Inference pipeline configuration data class.
416
+
417
+ Attributes:
418
+ name: Name of the inference pipeline
419
+ description: Description of the inference pipeline
420
+ applications: List of application deployments
421
+ aggregators: List of aggregators (optional)
422
+ id: Unique identifier for the pipeline (MongoDB ObjectID)
423
+ project_id: Project ID this pipeline belongs to
424
+ user_id: User ID who created the pipeline
425
+ status: Status of the pipeline
426
+ created_at: Creation timestamp
427
+ updated_at: Last update timestamp
428
+ """
429
+
430
+ def from_dict(cls: Any, data: Dict) -> Any: ...
431
+ """
432
+ Create an InferencePipelineConfig instance from API response data.
433
+ """
434
+
435
+ def to_dict(self: Any) -> Dict: ...
436
+ """
437
+ Convert the inference pipeline config to a dictionary for API calls.
438
+ """
439
+
440
+ class InferencePipelineManager:
441
+ """
442
+ Manager for inference pipeline operations.
443
+
444
+ This class provides methods to create, list, and manage multiple inference pipelines
445
+ within a project. It handles the overall management of inference pipelines while
446
+ individual pipelines are managed through the InferencePipeline class.
447
+
448
+ Example:
449
+ Managing multiple inference pipelines:
450
+ ```python
451
+ from matrice import Session
452
+ from matrice_inference.deployment.inference_pipeline import InferencePipelineManager, InferencePipelineConfig, ApplicationDeployment
453
+
454
+ session = Session(account_number="...", access_key="...", secret_key="...")
455
+ manager = InferencePipelineManager(session)
456
+
457
+ # Create a new pipeline
458
+ apps = [
459
+ ApplicationDeployment(
460
+ application_id="664ab1df23abcf1c33123456",
461
+ application_version="v1.0"
462
+ )
463
+ ]
464
+
465
+ config = InferencePipelineConfig(
466
+ name="Multi-App Pipeline",
467
+ description="Pipeline for multiple applications",
468
+ applications=apps
469
+ )
470
+
471
+ pipeline, error, message = manager.create_inference_pipeline(config)
472
+ if not error:
473
+ print(f"Created pipeline: {pipeline.id}")
474
+
475
+ # List all pipelines
476
+ pipelines, error, message = manager.get_inference_pipelines()
477
+ if not error:
478
+ print(f"Found {len(pipelines)} pipelines")
479
+ ```
480
+ """
481
+
482
+ def __init__(self: Any, session: Any, project_id: str = None) -> None: ...
483
+ """
484
+ Initialize the InferencePipelineManager.
485
+
486
+ Args:
487
+ session: Session object containing RPC client for API communication
488
+ project_id: The ID of the project (optional, can be inferred from session)
489
+ """
490
+
491
+ def create_inference_pipeline(self: Any, config: Any, project_id: str = None) -> Tuple[Optional['InferencePipeline'], Optional[str], str]: ...
492
+ """
493
+ Create a new inference pipeline.
494
+
495
+ Args:
496
+ config: InferencePipelineConfig object containing the pipeline configuration
497
+ project_id: The ID of the project (optional, uses manager's project_id if not provided)
498
+
499
+ Returns:
500
+ tuple: (inference_pipeline_instance, error, message)
501
+ """
502
+
503
+ def get_inference_pipeline_by_id(self: Any, pipeline_id: str) -> Tuple[Optional['InferencePipeline'], Optional[str], str]: ...
504
+ """
505
+ Get an inference pipeline by its ID.
506
+
507
+ Args:
508
+ pipeline_id: The ID of the inference pipeline to retrieve
509
+
510
+ Returns:
511
+ tuple: (inference_pipeline_instance, error, message)
512
+ """
513
+
514
+ def get_inference_pipelines(self: Any, page: int = 1, limit: int = 10, search: str = None, project_id: str = None) -> Tuple[Optional[List['InferencePipeline']], Optional[str], str]: ...
515
+ """
516
+ Get all inference pipelines for a project.
517
+
518
+ Args:
519
+ page: Page number for pagination
520
+ limit: Items per page
521
+ search: Optional search term
522
+ project_id: The ID of the project (optional, uses manager's project_id if not provided)
523
+
524
+ Returns:
525
+ tuple: (inference_pipeline_instances, error, message)
526
+ """
527
+