langgraph-api 0.4.1__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. langgraph_api/__init__.py +1 -1
  2. langgraph_api/api/__init__.py +111 -51
  3. langgraph_api/api/a2a.py +1610 -0
  4. langgraph_api/api/assistants.py +212 -89
  5. langgraph_api/api/mcp.py +3 -3
  6. langgraph_api/api/meta.py +52 -28
  7. langgraph_api/api/openapi.py +27 -17
  8. langgraph_api/api/profile.py +108 -0
  9. langgraph_api/api/runs.py +342 -195
  10. langgraph_api/api/store.py +19 -2
  11. langgraph_api/api/threads.py +209 -27
  12. langgraph_api/asgi_transport.py +14 -9
  13. langgraph_api/asyncio.py +14 -4
  14. langgraph_api/auth/custom.py +52 -37
  15. langgraph_api/auth/langsmith/backend.py +4 -3
  16. langgraph_api/auth/langsmith/client.py +13 -8
  17. langgraph_api/cli.py +230 -133
  18. langgraph_api/command.py +5 -3
  19. langgraph_api/config/__init__.py +532 -0
  20. langgraph_api/config/_parse.py +58 -0
  21. langgraph_api/config/schemas.py +431 -0
  22. langgraph_api/cron_scheduler.py +17 -1
  23. langgraph_api/encryption/__init__.py +15 -0
  24. langgraph_api/encryption/aes_json.py +158 -0
  25. langgraph_api/encryption/context.py +35 -0
  26. langgraph_api/encryption/custom.py +280 -0
  27. langgraph_api/encryption/middleware.py +632 -0
  28. langgraph_api/encryption/shared.py +63 -0
  29. langgraph_api/errors.py +12 -1
  30. langgraph_api/executor_entrypoint.py +11 -6
  31. langgraph_api/feature_flags.py +29 -0
  32. langgraph_api/graph.py +176 -76
  33. langgraph_api/grpc/client.py +313 -0
  34. langgraph_api/grpc/config_conversion.py +231 -0
  35. langgraph_api/grpc/generated/__init__.py +29 -0
  36. langgraph_api/grpc/generated/checkpointer_pb2.py +63 -0
  37. langgraph_api/grpc/generated/checkpointer_pb2.pyi +99 -0
  38. langgraph_api/grpc/generated/checkpointer_pb2_grpc.py +329 -0
  39. langgraph_api/grpc/generated/core_api_pb2.py +216 -0
  40. langgraph_api/grpc/generated/core_api_pb2.pyi +905 -0
  41. langgraph_api/grpc/generated/core_api_pb2_grpc.py +1621 -0
  42. langgraph_api/grpc/generated/engine_common_pb2.py +219 -0
  43. langgraph_api/grpc/generated/engine_common_pb2.pyi +722 -0
  44. langgraph_api/grpc/generated/engine_common_pb2_grpc.py +24 -0
  45. langgraph_api/grpc/generated/enum_cancel_run_action_pb2.py +37 -0
  46. langgraph_api/grpc/generated/enum_cancel_run_action_pb2.pyi +12 -0
  47. langgraph_api/grpc/generated/enum_cancel_run_action_pb2_grpc.py +24 -0
  48. langgraph_api/grpc/generated/enum_control_signal_pb2.py +37 -0
  49. langgraph_api/grpc/generated/enum_control_signal_pb2.pyi +16 -0
  50. langgraph_api/grpc/generated/enum_control_signal_pb2_grpc.py +24 -0
  51. langgraph_api/grpc/generated/enum_durability_pb2.py +37 -0
  52. langgraph_api/grpc/generated/enum_durability_pb2.pyi +16 -0
  53. langgraph_api/grpc/generated/enum_durability_pb2_grpc.py +24 -0
  54. langgraph_api/grpc/generated/enum_multitask_strategy_pb2.py +37 -0
  55. langgraph_api/grpc/generated/enum_multitask_strategy_pb2.pyi +16 -0
  56. langgraph_api/grpc/generated/enum_multitask_strategy_pb2_grpc.py +24 -0
  57. langgraph_api/grpc/generated/enum_run_status_pb2.py +37 -0
  58. langgraph_api/grpc/generated/enum_run_status_pb2.pyi +22 -0
  59. langgraph_api/grpc/generated/enum_run_status_pb2_grpc.py +24 -0
  60. langgraph_api/grpc/generated/enum_stream_mode_pb2.py +37 -0
  61. langgraph_api/grpc/generated/enum_stream_mode_pb2.pyi +28 -0
  62. langgraph_api/grpc/generated/enum_stream_mode_pb2_grpc.py +24 -0
  63. langgraph_api/grpc/generated/enum_thread_status_pb2.py +37 -0
  64. langgraph_api/grpc/generated/enum_thread_status_pb2.pyi +16 -0
  65. langgraph_api/grpc/generated/enum_thread_status_pb2_grpc.py +24 -0
  66. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2.py +37 -0
  67. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2.pyi +16 -0
  68. langgraph_api/grpc/generated/enum_thread_stream_mode_pb2_grpc.py +24 -0
  69. langgraph_api/grpc/generated/errors_pb2.py +39 -0
  70. langgraph_api/grpc/generated/errors_pb2.pyi +21 -0
  71. langgraph_api/grpc/generated/errors_pb2_grpc.py +24 -0
  72. langgraph_api/grpc/ops/__init__.py +370 -0
  73. langgraph_api/grpc/ops/assistants.py +424 -0
  74. langgraph_api/grpc/ops/runs.py +792 -0
  75. langgraph_api/grpc/ops/threads.py +1013 -0
  76. langgraph_api/http.py +16 -5
  77. langgraph_api/http_metrics.py +15 -35
  78. langgraph_api/http_metrics_utils.py +38 -0
  79. langgraph_api/js/build.mts +1 -1
  80. langgraph_api/js/client.http.mts +13 -7
  81. langgraph_api/js/client.mts +2 -5
  82. langgraph_api/js/package.json +29 -28
  83. langgraph_api/js/remote.py +56 -30
  84. langgraph_api/js/src/graph.mts +20 -0
  85. langgraph_api/js/sse.py +2 -2
  86. langgraph_api/js/ui.py +1 -1
  87. langgraph_api/js/yarn.lock +1204 -1006
  88. langgraph_api/logging.py +29 -2
  89. langgraph_api/metadata.py +99 -28
  90. langgraph_api/middleware/http_logger.py +7 -2
  91. langgraph_api/middleware/private_network.py +7 -7
  92. langgraph_api/models/run.py +54 -93
  93. langgraph_api/otel_context.py +205 -0
  94. langgraph_api/patch.py +5 -3
  95. langgraph_api/queue_entrypoint.py +154 -65
  96. langgraph_api/route.py +47 -5
  97. langgraph_api/schema.py +88 -10
  98. langgraph_api/self_hosted_logs.py +124 -0
  99. langgraph_api/self_hosted_metrics.py +450 -0
  100. langgraph_api/serde.py +79 -37
  101. langgraph_api/server.py +138 -60
  102. langgraph_api/state.py +4 -3
  103. langgraph_api/store.py +25 -16
  104. langgraph_api/stream.py +80 -29
  105. langgraph_api/thread_ttl.py +31 -13
  106. langgraph_api/timing/__init__.py +25 -0
  107. langgraph_api/timing/profiler.py +200 -0
  108. langgraph_api/timing/timer.py +318 -0
  109. langgraph_api/utils/__init__.py +53 -8
  110. langgraph_api/utils/cache.py +47 -10
  111. langgraph_api/utils/config.py +2 -1
  112. langgraph_api/utils/errors.py +77 -0
  113. langgraph_api/utils/future.py +10 -6
  114. langgraph_api/utils/headers.py +76 -2
  115. langgraph_api/utils/retriable_client.py +74 -0
  116. langgraph_api/utils/stream_codec.py +315 -0
  117. langgraph_api/utils/uuids.py +29 -62
  118. langgraph_api/validation.py +9 -0
  119. langgraph_api/webhook.py +120 -6
  120. langgraph_api/worker.py +55 -24
  121. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/METADATA +16 -8
  122. langgraph_api-0.7.3.dist-info/RECORD +168 -0
  123. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/WHEEL +1 -1
  124. langgraph_runtime/__init__.py +1 -0
  125. langgraph_runtime/routes.py +11 -0
  126. logging.json +1 -3
  127. openapi.json +839 -478
  128. langgraph_api/config.py +0 -387
  129. langgraph_api/js/isolate-0x130008000-46649-46649-v8.log +0 -4430
  130. langgraph_api/js/isolate-0x138008000-44681-44681-v8.log +0 -4430
  131. langgraph_api/js/package-lock.json +0 -3308
  132. langgraph_api-0.4.1.dist-info/RECORD +0 -107
  133. /langgraph_api/{utils.py → grpc/__init__.py} +0 -0
  134. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/entry_points.txt +0 -0
  135. {langgraph_api-0.4.1.dist-info → langgraph_api-0.7.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,424 @@
1
+ """gRPC-based assistants operations."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import UTC
6
+ from typing import TYPE_CHECKING, Any
7
+ from uuid import UUID
8
+
9
+ from langgraph_api.grpc import config_conversion
10
+ from langgraph_api.grpc.client import get_shared_client
11
+ from langgraph_api.grpc.generated import core_api_pb2 as pb
12
+ from langgraph_api.grpc.ops import (
13
+ Authenticated,
14
+ _map_sort_order,
15
+ consolidate_config_and_context,
16
+ grpc_error_guard,
17
+ map_if_exists,
18
+ )
19
+ from langgraph_api.schema import (
20
+ Assistant,
21
+ AssistantSelectField,
22
+ Config,
23
+ Context,
24
+ MetadataInput,
25
+ OnConflictBehavior,
26
+ )
27
+ from langgraph_api.serde import json_dumpb_optional, json_loads_optional
28
+
29
+ if TYPE_CHECKING:
30
+ from collections.abc import AsyncIterator
31
+
32
+
33
+ def proto_to_assistant(proto_assistant: pb.Assistant) -> Assistant:
34
+ """Convert protobuf Assistant to dictionary format."""
35
+ # Preserve None for optional scalar fields by checking presence via HasField
36
+ description = (
37
+ proto_assistant.description if proto_assistant.HasField("description") else None
38
+ )
39
+ return {
40
+ "assistant_id": proto_assistant.assistant_id,
41
+ "graph_id": proto_assistant.graph_id,
42
+ "version": proto_assistant.version,
43
+ "created_at": proto_assistant.created_at.ToDatetime(tzinfo=UTC),
44
+ "updated_at": proto_assistant.updated_at.ToDatetime(tzinfo=UTC),
45
+ "config": config_conversion.config_from_proto(proto_assistant.config),
46
+ "context": json_loads_optional(proto_assistant.context_json),
47
+ "metadata": json_loads_optional(proto_assistant.metadata_json),
48
+ "name": proto_assistant.name,
49
+ "description": description,
50
+ }
51
+
52
+
53
+ def _map_sort_by(sort_by: str | None) -> pb.AssistantsSortBy:
54
+ """Map string sort_by to protobuf enum."""
55
+ if not sort_by:
56
+ return pb.AssistantsSortBy.CREATED_AT
57
+
58
+ sort_by_lower = sort_by.lower()
59
+ mapping = {
60
+ "assistant_id": pb.AssistantsSortBy.ASSISTANT_ID,
61
+ "graph_id": pb.AssistantsSortBy.GRAPH_ID,
62
+ "name": pb.AssistantsSortBy.NAME,
63
+ "created_at": pb.AssistantsSortBy.CREATED_AT,
64
+ "updated_at": pb.AssistantsSortBy.UPDATED_AT,
65
+ }
66
+ return mapping.get(sort_by_lower, pb.AssistantsSortBy.CREATED_AT)
67
+
68
+
69
+ @grpc_error_guard
70
+ class Assistants(Authenticated):
71
+ """gRPC-based assistants operations."""
72
+
73
+ resource = "assistants"
74
+
75
+ @staticmethod
76
+ async def search(
77
+ conn, # Not used in gRPC implementation
78
+ *,
79
+ graph_id: str | None,
80
+ name: str | None,
81
+ metadata: MetadataInput,
82
+ limit: int,
83
+ offset: int,
84
+ sort_by: str | None = None,
85
+ sort_order: str | None = None,
86
+ select: list[AssistantSelectField] | None = None,
87
+ ctx: Any = None,
88
+ ) -> tuple[AsyncIterator[Assistant], int | None]: # type: ignore[return-value]
89
+ """Search assistants via gRPC."""
90
+ # Handle auth filters
91
+ auth_filters = await Assistants.handle_event(
92
+ ctx,
93
+ "search",
94
+ {
95
+ "graph_id": graph_id,
96
+ "metadata": metadata,
97
+ "limit": limit,
98
+ "offset": offset,
99
+ },
100
+ )
101
+
102
+ # Build the gRPC request
103
+ request = pb.SearchAssistantsRequest(
104
+ filters=auth_filters,
105
+ graph_id=graph_id,
106
+ metadata_json=json_dumpb_optional(metadata),
107
+ limit=limit,
108
+ offset=offset,
109
+ sort_by=_map_sort_by(sort_by),
110
+ sort_order=_map_sort_order(sort_order),
111
+ select=select,
112
+ name=name,
113
+ )
114
+
115
+ client = await get_shared_client()
116
+ response = await client.assistants.Search(request)
117
+
118
+ # Convert response to expected format
119
+ assistants = [
120
+ proto_to_assistant(assistant) for assistant in response.assistants
121
+ ]
122
+
123
+ # Determine if there are more results
124
+ # Note: gRPC doesn't return cursor info, so we estimate based on result count
125
+ cursor = offset + limit if len(assistants) == limit else None
126
+
127
+ async def generate_results():
128
+ for assistant in assistants:
129
+ yield {
130
+ k: v for k, v in assistant.items() if select is None or k in select
131
+ }
132
+
133
+ return generate_results(), cursor
134
+
135
+ @staticmethod
136
+ async def get(
137
+ conn, # Not used in gRPC implementation
138
+ assistant_id: UUID | str,
139
+ ctx: Any = None,
140
+ ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
141
+ """Get assistant by ID via gRPC."""
142
+ # Handle auth filters
143
+ auth_filters = await Assistants.handle_event(
144
+ ctx, "read", {"assistant_id": str(assistant_id)}
145
+ )
146
+
147
+ # Build the gRPC request
148
+ request = pb.GetAssistantRequest(
149
+ assistant_id=str(assistant_id),
150
+ filters=auth_filters,
151
+ )
152
+
153
+ client = await get_shared_client()
154
+ response = await client.assistants.Get(request)
155
+
156
+ # Convert and yield the result
157
+ assistant = proto_to_assistant(response)
158
+
159
+ async def generate_result():
160
+ yield assistant
161
+
162
+ return generate_result()
163
+
164
+ @staticmethod
165
+ async def put(
166
+ conn, # Not used in gRPC implementation
167
+ assistant_id: UUID | str,
168
+ *,
169
+ graph_id: str,
170
+ config: Config,
171
+ context: Context,
172
+ metadata: MetadataInput,
173
+ if_exists: OnConflictBehavior,
174
+ name: str,
175
+ description: str | None = None,
176
+ ctx: Any = None,
177
+ ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
178
+ """Create/update assistant via gRPC."""
179
+ context = context or {}
180
+ # Handle auth filters
181
+ auth_filters = await Assistants.handle_event(
182
+ ctx,
183
+ "create",
184
+ {
185
+ "assistant_id": str(assistant_id),
186
+ "graph_id": graph_id,
187
+ "config": config,
188
+ "context": context,
189
+ "metadata": metadata,
190
+ "name": name,
191
+ "description": description,
192
+ },
193
+ )
194
+
195
+ config, context = consolidate_config_and_context(config, context)
196
+
197
+ on_conflict = map_if_exists(if_exists)
198
+
199
+ # Build the gRPC request
200
+ request = pb.CreateAssistantRequest(
201
+ assistant_id=str(assistant_id),
202
+ graph_id=graph_id,
203
+ filters=auth_filters,
204
+ if_exists=on_conflict,
205
+ config=config_conversion.config_to_proto(config),
206
+ context_json=json_dumpb_optional(context),
207
+ metadata_json=json_dumpb_optional(metadata),
208
+ name=name,
209
+ description=description,
210
+ )
211
+
212
+ client = await get_shared_client()
213
+ response = await client.assistants.Create(request)
214
+
215
+ # Convert and yield the result
216
+ assistant = proto_to_assistant(response)
217
+
218
+ async def generate_result():
219
+ yield assistant
220
+
221
+ return generate_result()
222
+
223
+ @staticmethod
224
+ async def patch(
225
+ conn, # Not used in gRPC implementation
226
+ assistant_id: UUID | str,
227
+ *,
228
+ config: Config | None = None,
229
+ context: Context | None = None,
230
+ graph_id: str | None = None,
231
+ metadata: MetadataInput | None = None,
232
+ name: str | None = None,
233
+ description: str | None = None,
234
+ ctx: Any = None,
235
+ ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
236
+ """Update assistant via gRPC."""
237
+ metadata = metadata if metadata is not None else {}
238
+ config = config if config is not None else Config()
239
+ # Handle auth filters
240
+ auth_filters = await Assistants.handle_event(
241
+ ctx,
242
+ "update",
243
+ {
244
+ "assistant_id": str(assistant_id),
245
+ "graph_id": graph_id,
246
+ "config": config,
247
+ "context": context,
248
+ "metadata": metadata,
249
+ "name": name,
250
+ "description": description,
251
+ },
252
+ )
253
+
254
+ config, context = consolidate_config_and_context(config, context)
255
+
256
+ # Build the gRPC request
257
+ request = pb.PatchAssistantRequest(
258
+ assistant_id=str(assistant_id),
259
+ filters=auth_filters,
260
+ graph_id=graph_id,
261
+ context_json=json_dumpb_optional(context),
262
+ metadata_json=json_dumpb_optional(metadata),
263
+ name=name,
264
+ description=description,
265
+ )
266
+
267
+ # Add optional config if provided
268
+ if config:
269
+ request.config.CopyFrom(config_conversion.config_to_proto(config))
270
+
271
+ client = await get_shared_client()
272
+ response = await client.assistants.Patch(request)
273
+
274
+ # Convert and yield the result
275
+ assistant = proto_to_assistant(response)
276
+
277
+ async def generate_result():
278
+ yield assistant
279
+
280
+ return generate_result()
281
+
282
+ @staticmethod
283
+ async def delete(
284
+ conn: Any, # Not used in gRPC implementation
285
+ assistant_id: UUID | str,
286
+ ctx: Any = None,
287
+ *,
288
+ delete_threads: bool = False,
289
+ ) -> AsyncIterator[UUID]: # type: ignore[return-value]
290
+ """Delete assistant via gRPC."""
291
+ # Handle auth filters
292
+ auth_filters = await Assistants.handle_event(
293
+ ctx, "delete", {"assistant_id": str(assistant_id)}
294
+ )
295
+
296
+ # Build the gRPC request
297
+ request = pb.DeleteAssistantRequest(
298
+ assistant_id=str(assistant_id),
299
+ filters=auth_filters,
300
+ delete_threads=delete_threads,
301
+ )
302
+
303
+ client = await get_shared_client()
304
+ await client.assistants.Delete(request)
305
+
306
+ # Return the deleted ID
307
+ async def generate_result():
308
+ yield UUID(str(assistant_id))
309
+
310
+ return generate_result()
311
+
312
+ @staticmethod
313
+ async def set_latest(
314
+ conn, # Not used in gRPC implementation
315
+ assistant_id: UUID | str,
316
+ version: int,
317
+ ctx: Any = None,
318
+ ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
319
+ """Set latest version of assistant via gRPC."""
320
+ # Handle auth filters
321
+ auth_filters = await Assistants.handle_event(
322
+ ctx,
323
+ "update",
324
+ {
325
+ "assistant_id": str(assistant_id),
326
+ "version": version,
327
+ },
328
+ )
329
+
330
+ # Build the gRPC request
331
+ request = pb.SetLatestAssistantRequest(
332
+ assistant_id=str(assistant_id),
333
+ version=version,
334
+ filters=auth_filters,
335
+ )
336
+
337
+ client = await get_shared_client()
338
+ response = await client.assistants.SetLatest(request)
339
+
340
+ # Convert and yield the result
341
+ assistant = proto_to_assistant(response)
342
+
343
+ async def generate_result():
344
+ yield assistant
345
+
346
+ return generate_result()
347
+
348
+ @staticmethod
349
+ async def get_versions(
350
+ conn, # Not used in gRPC implementation
351
+ assistant_id: UUID | str,
352
+ metadata: MetadataInput,
353
+ limit: int,
354
+ offset: int,
355
+ ctx: Any = None,
356
+ ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
357
+ """Get all versions of assistant via gRPC."""
358
+ # Handle auth filters
359
+ auth_filters = await Assistants.handle_event(
360
+ ctx,
361
+ "search",
362
+ {"assistant_id": str(assistant_id), "metadata": metadata},
363
+ )
364
+
365
+ # Build the gRPC request
366
+ request = pb.GetAssistantVersionsRequest(
367
+ assistant_id=str(assistant_id),
368
+ filters=auth_filters,
369
+ metadata_json=json_dumpb_optional(metadata),
370
+ limit=limit,
371
+ offset=offset,
372
+ )
373
+
374
+ client = await get_shared_client()
375
+ response = await client.assistants.GetVersions(request)
376
+
377
+ # Convert and yield the results
378
+ async def generate_results():
379
+ for version in response.versions:
380
+ # Preserve None for optional scalar fields by checking presence
381
+ version_description = (
382
+ version.description if version.HasField("description") else None
383
+ )
384
+ yield {
385
+ "assistant_id": version.assistant_id,
386
+ "graph_id": version.graph_id,
387
+ "version": version.version,
388
+ "created_at": version.created_at.ToDatetime(tzinfo=UTC),
389
+ "config": config_conversion.config_from_proto(version.config),
390
+ "context": json_loads_optional(version.context_json),
391
+ "metadata": json_loads_optional(version.metadata_json),
392
+ "name": version.name,
393
+ "description": version_description,
394
+ }
395
+
396
+ return generate_results()
397
+
398
+ @staticmethod
399
+ async def count(
400
+ conn, # Not used in gRPC implementation
401
+ *,
402
+ graph_id: str | None = None,
403
+ name: str | None = None,
404
+ metadata: MetadataInput = None,
405
+ ctx: Any = None,
406
+ ) -> int: # type: ignore[return-value]
407
+ """Count assistants via gRPC."""
408
+ # Handle auth filters
409
+ auth_filters = await Assistants.handle_event(
410
+ ctx, "search", {"graph_id": graph_id, "metadata": metadata}
411
+ )
412
+
413
+ # Build the gRPC request
414
+ request = pb.CountAssistantsRequest(
415
+ filters=auth_filters,
416
+ graph_id=graph_id,
417
+ name=name,
418
+ metadata_json=json_dumpb_optional(metadata),
419
+ )
420
+
421
+ client = await get_shared_client()
422
+ response = await client.assistants.Count(request)
423
+
424
+ return int(response.count)