letta-nightly 0.11.7.dev20251006104136__py3-none-any.whl → 0.11.7.dev20251008104128__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. letta/adapters/letta_llm_adapter.py +1 -0
  2. letta/adapters/letta_llm_request_adapter.py +0 -1
  3. letta/adapters/letta_llm_stream_adapter.py +7 -2
  4. letta/adapters/simple_llm_request_adapter.py +88 -0
  5. letta/adapters/simple_llm_stream_adapter.py +192 -0
  6. letta/agents/agent_loop.py +6 -0
  7. letta/agents/ephemeral_summary_agent.py +2 -1
  8. letta/agents/helpers.py +142 -6
  9. letta/agents/letta_agent.py +13 -33
  10. letta/agents/letta_agent_batch.py +2 -4
  11. letta/agents/letta_agent_v2.py +87 -77
  12. letta/agents/letta_agent_v3.py +899 -0
  13. letta/agents/voice_agent.py +2 -6
  14. letta/constants.py +8 -4
  15. letta/errors.py +40 -0
  16. letta/functions/function_sets/base.py +84 -4
  17. letta/functions/function_sets/multi_agent.py +0 -3
  18. letta/functions/schema_generator.py +113 -71
  19. letta/groups/dynamic_multi_agent.py +3 -2
  20. letta/groups/helpers.py +1 -2
  21. letta/groups/round_robin_multi_agent.py +3 -2
  22. letta/groups/sleeptime_multi_agent.py +3 -2
  23. letta/groups/sleeptime_multi_agent_v2.py +1 -1
  24. letta/groups/sleeptime_multi_agent_v3.py +17 -17
  25. letta/groups/supervisor_multi_agent.py +84 -80
  26. letta/helpers/converters.py +3 -0
  27. letta/helpers/message_helper.py +4 -0
  28. letta/helpers/tool_rule_solver.py +92 -5
  29. letta/interfaces/anthropic_streaming_interface.py +409 -0
  30. letta/interfaces/gemini_streaming_interface.py +296 -0
  31. letta/interfaces/openai_streaming_interface.py +752 -1
  32. letta/llm_api/anthropic_client.py +126 -16
  33. letta/llm_api/bedrock_client.py +4 -2
  34. letta/llm_api/deepseek_client.py +4 -1
  35. letta/llm_api/google_vertex_client.py +123 -42
  36. letta/llm_api/groq_client.py +4 -1
  37. letta/llm_api/llm_api_tools.py +11 -4
  38. letta/llm_api/llm_client_base.py +6 -2
  39. letta/llm_api/openai.py +32 -2
  40. letta/llm_api/openai_client.py +423 -18
  41. letta/llm_api/xai_client.py +4 -1
  42. letta/main.py +9 -5
  43. letta/memory.py +1 -0
  44. letta/orm/__init__.py +1 -1
  45. letta/orm/agent.py +10 -0
  46. letta/orm/block.py +7 -16
  47. letta/orm/blocks_agents.py +8 -2
  48. letta/orm/files_agents.py +2 -0
  49. letta/orm/job.py +7 -5
  50. letta/orm/mcp_oauth.py +1 -0
  51. letta/orm/message.py +21 -6
  52. letta/orm/organization.py +2 -0
  53. letta/orm/provider.py +6 -2
  54. letta/orm/run.py +71 -0
  55. letta/orm/sandbox_config.py +7 -1
  56. letta/orm/sqlalchemy_base.py +0 -306
  57. letta/orm/step.py +6 -5
  58. letta/orm/step_metrics.py +5 -5
  59. letta/otel/tracing.py +28 -3
  60. letta/plugins/defaults.py +4 -4
  61. letta/prompts/system_prompts/__init__.py +2 -0
  62. letta/prompts/system_prompts/letta_v1.py +25 -0
  63. letta/schemas/agent.py +3 -2
  64. letta/schemas/agent_file.py +9 -3
  65. letta/schemas/block.py +23 -10
  66. letta/schemas/enums.py +21 -2
  67. letta/schemas/job.py +17 -4
  68. letta/schemas/letta_message_content.py +71 -2
  69. letta/schemas/letta_stop_reason.py +5 -5
  70. letta/schemas/llm_config.py +53 -3
  71. letta/schemas/memory.py +1 -1
  72. letta/schemas/message.py +504 -117
  73. letta/schemas/openai/responses_request.py +64 -0
  74. letta/schemas/providers/__init__.py +2 -0
  75. letta/schemas/providers/anthropic.py +16 -0
  76. letta/schemas/providers/ollama.py +115 -33
  77. letta/schemas/providers/openrouter.py +52 -0
  78. letta/schemas/providers/vllm.py +2 -1
  79. letta/schemas/run.py +48 -42
  80. letta/schemas/step.py +2 -2
  81. letta/schemas/step_metrics.py +1 -1
  82. letta/schemas/tool.py +15 -107
  83. letta/schemas/tool_rule.py +88 -5
  84. letta/serialize_schemas/marshmallow_agent.py +1 -0
  85. letta/server/db.py +86 -408
  86. letta/server/rest_api/app.py +61 -10
  87. letta/server/rest_api/dependencies.py +14 -0
  88. letta/server/rest_api/redis_stream_manager.py +19 -8
  89. letta/server/rest_api/routers/v1/agents.py +364 -292
  90. letta/server/rest_api/routers/v1/blocks.py +14 -20
  91. letta/server/rest_api/routers/v1/identities.py +45 -110
  92. letta/server/rest_api/routers/v1/internal_templates.py +21 -0
  93. letta/server/rest_api/routers/v1/jobs.py +23 -6
  94. letta/server/rest_api/routers/v1/messages.py +1 -1
  95. letta/server/rest_api/routers/v1/runs.py +126 -85
  96. letta/server/rest_api/routers/v1/sandbox_configs.py +10 -19
  97. letta/server/rest_api/routers/v1/tools.py +281 -594
  98. letta/server/rest_api/routers/v1/voice.py +1 -1
  99. letta/server/rest_api/streaming_response.py +29 -29
  100. letta/server/rest_api/utils.py +122 -64
  101. letta/server/server.py +160 -887
  102. letta/services/agent_manager.py +236 -919
  103. letta/services/agent_serialization_manager.py +16 -0
  104. letta/services/archive_manager.py +0 -100
  105. letta/services/block_manager.py +211 -168
  106. letta/services/file_manager.py +1 -1
  107. letta/services/files_agents_manager.py +24 -33
  108. letta/services/group_manager.py +0 -142
  109. letta/services/helpers/agent_manager_helper.py +7 -2
  110. letta/services/helpers/run_manager_helper.py +85 -0
  111. letta/services/job_manager.py +96 -411
  112. letta/services/lettuce/__init__.py +6 -0
  113. letta/services/lettuce/lettuce_client_base.py +86 -0
  114. letta/services/mcp_manager.py +38 -6
  115. letta/services/message_manager.py +165 -362
  116. letta/services/organization_manager.py +0 -36
  117. letta/services/passage_manager.py +0 -345
  118. letta/services/provider_manager.py +0 -80
  119. letta/services/run_manager.py +301 -0
  120. letta/services/sandbox_config_manager.py +0 -234
  121. letta/services/step_manager.py +62 -39
  122. letta/services/summarizer/summarizer.py +9 -7
  123. letta/services/telemetry_manager.py +0 -16
  124. letta/services/tool_executor/builtin_tool_executor.py +35 -0
  125. letta/services/tool_executor/core_tool_executor.py +397 -2
  126. letta/services/tool_executor/files_tool_executor.py +3 -3
  127. letta/services/tool_executor/multi_agent_tool_executor.py +30 -15
  128. letta/services/tool_executor/tool_execution_manager.py +6 -8
  129. letta/services/tool_executor/tool_executor_base.py +3 -3
  130. letta/services/tool_manager.py +85 -339
  131. letta/services/tool_sandbox/base.py +24 -13
  132. letta/services/tool_sandbox/e2b_sandbox.py +16 -1
  133. letta/services/tool_schema_generator.py +123 -0
  134. letta/services/user_manager.py +0 -99
  135. letta/settings.py +20 -4
  136. {letta_nightly-0.11.7.dev20251006104136.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/METADATA +3 -5
  137. {letta_nightly-0.11.7.dev20251006104136.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/RECORD +140 -132
  138. letta/agents/temporal/activities/__init__.py +0 -4
  139. letta/agents/temporal/activities/example_activity.py +0 -7
  140. letta/agents/temporal/activities/prepare_messages.py +0 -10
  141. letta/agents/temporal/temporal_agent_workflow.py +0 -56
  142. letta/agents/temporal/types.py +0 -25
  143. {letta_nightly-0.11.7.dev20251006104136.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/WHEEL +0 -0
  144. {letta_nightly-0.11.7.dev20251006104136.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/entry_points.txt +0 -0
  145. {letta_nightly-0.11.7.dev20251006104136.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/licenses/LICENSE +0 -0
@@ -8,7 +8,6 @@ from sqlalchemy.orm import Session
8
8
 
9
9
  from letta.helpers.singleton import singleton
10
10
  from letta.orm.errors import NoResultFound
11
- from letta.orm.job import Job as JobModel
12
11
  from letta.orm.message import Message as MessageModel
13
12
  from letta.orm.sqlalchemy_base import AccessType
14
13
  from letta.orm.step import Step as StepModel
@@ -48,6 +47,7 @@ class StepManager:
48
47
  feedback: Optional[Literal["positive", "negative"]] = None,
49
48
  has_feedback: Optional[bool] = None,
50
49
  project_id: Optional[str] = None,
50
+ run_id: Optional[str] = None,
51
51
  ) -> List[PydanticStep]:
52
52
  """List all jobs with optional pagination and status filter."""
53
53
  async with db_registry.async_session() as session:
@@ -62,6 +62,8 @@ class StepManager:
62
62
  filter_kwargs["feedback"] = feedback
63
63
  if project_id:
64
64
  filter_kwargs["project_id"] = project_id
65
+ if run_id:
66
+ filter_kwargs["run_id"] = run_id
65
67
  steps = await StepModel.list_async(
66
68
  db_session=session,
67
69
  before=before,
@@ -88,7 +90,7 @@ class StepManager:
88
90
  context_window_limit: int,
89
91
  usage: UsageStatistics,
90
92
  provider_id: Optional[str] = None,
91
- job_id: Optional[str] = None,
93
+ run_id: Optional[str] = None,
92
94
  step_id: Optional[str] = None,
93
95
  project_id: Optional[str] = None,
94
96
  stop_reason: Optional[LettaStopReason] = None,
@@ -109,7 +111,7 @@ class StepManager:
109
111
  "completion_tokens": usage.completion_tokens,
110
112
  "prompt_tokens": usage.prompt_tokens,
111
113
  "total_tokens": usage.total_tokens,
112
- "job_id": job_id,
114
+ "run_id": run_id,
113
115
  "tags": [],
114
116
  "tid": None,
115
117
  "trace_id": get_trace_id(), # Get the current trace ID
@@ -123,8 +125,8 @@ class StepManager:
123
125
  if stop_reason:
124
126
  step_data["stop_reason"] = stop_reason.stop_reason
125
127
  with db_registry.session() as session:
126
- if job_id:
127
- self._verify_job_access(session, job_id, actor, access=["write"])
128
+ if run_id:
129
+ self._verify_run_access(session, run_id, actor, access=["write"])
128
130
  new_step = StepModel(**step_data)
129
131
  new_step.create(session)
130
132
  return new_step.to_pydantic()
@@ -142,13 +144,14 @@ class StepManager:
142
144
  context_window_limit: int,
143
145
  usage: UsageStatistics,
144
146
  provider_id: Optional[str] = None,
145
- job_id: Optional[str] = None,
147
+ run_id: Optional[str] = None,
146
148
  step_id: Optional[str] = None,
147
149
  project_id: Optional[str] = None,
148
150
  stop_reason: Optional[LettaStopReason] = None,
149
151
  status: Optional[StepStatus] = None,
150
152
  error_type: Optional[str] = None,
151
153
  error_data: Optional[Dict] = None,
154
+ allow_partial: Optional[bool] = False,
152
155
  ) -> PydanticStep:
153
156
  step_data = {
154
157
  "origin": None,
@@ -163,7 +166,7 @@ class StepManager:
163
166
  "completion_tokens": usage.completion_tokens,
164
167
  "prompt_tokens": usage.prompt_tokens,
165
168
  "total_tokens": usage.total_tokens,
166
- "job_id": job_id,
169
+ "run_id": run_id,
167
170
  "tags": [],
168
171
  "tid": None,
169
172
  "trace_id": get_trace_id(), # Get the current trace ID
@@ -176,7 +179,15 @@ class StepManager:
176
179
  step_data["id"] = step_id
177
180
  if stop_reason:
178
181
  step_data["stop_reason"] = stop_reason.stop_reason
182
+
179
183
  async with db_registry.async_session() as session:
184
+ if allow_partial:
185
+ try:
186
+ new_step = await StepModel.read_async(db_session=session, identifier=step_id, actor=actor)
187
+ return new_step.to_pydantic()
188
+ except NoResultFound:
189
+ pass
190
+
180
191
  new_step = StepModel(**step_data)
181
192
  await new_step.create_async(session, no_commit=True, no_refresh=True)
182
193
  pydantic_step = new_step.to_pydantic()
@@ -420,10 +431,11 @@ class StepManager:
420
431
  tool_execution_ns: Optional[int] = None,
421
432
  step_ns: Optional[int] = None,
422
433
  agent_id: Optional[str] = None,
423
- job_id: Optional[str] = None,
434
+ run_id: Optional[str] = None,
424
435
  project_id: Optional[str] = None,
425
436
  template_id: Optional[str] = None,
426
437
  base_template_id: Optional[str] = None,
438
+ allow_partial: Optional[bool] = False,
427
439
  ) -> PydanticStepMetrics:
428
440
  """Record performance metrics for a step.
429
441
 
@@ -434,7 +446,7 @@ class StepManager:
434
446
  tool_execution_ns: Time spent on tool execution in nanoseconds
435
447
  step_ns: Total time for the step in nanoseconds
436
448
  agent_id: The ID of the agent
437
- job_id: The ID of the job
449
+ run_id: The ID of the run
438
450
  project_id: The ID of the project
439
451
  template_id: The ID of the template
440
452
  base_template_id: The ID of the base template
@@ -452,11 +464,18 @@ class StepManager:
452
464
  if step.organization_id != actor.organization_id:
453
465
  raise Exception("Unauthorized")
454
466
 
467
+ if allow_partial:
468
+ try:
469
+ metrics = await StepMetricsModel.read_async(db_session=session, identifier=step_id, actor=actor)
470
+ return metrics.to_pydantic()
471
+ except NoResultFound:
472
+ pass
473
+
455
474
  metrics_data = {
456
475
  "id": step_id,
457
476
  "organization_id": actor.organization_id,
458
477
  "agent_id": agent_id or step.agent_id,
459
- "job_id": job_id or step.job_id,
478
+ "run_id": run_id,
460
479
  "project_id": project_id or step.project_id,
461
480
  "llm_request_ns": llm_request_ns,
462
481
  "tool_execution_ns": tool_execution_ns,
@@ -469,62 +488,66 @@ class StepManager:
469
488
  await metrics.create_async(session)
470
489
  return metrics.to_pydantic()
471
490
 
472
- def _verify_job_access(
491
+ def _verify_run_access(
473
492
  self,
474
493
  session: Session,
475
- job_id: str,
494
+ run_id: str,
476
495
  actor: PydanticUser,
477
496
  access: List[Literal["read", "write", "delete"]] = ["read"],
478
- ) -> JobModel:
497
+ ):
479
498
  """
480
- Verify that a job exists and the user has the required access.
499
+ Verify that a run exists and the user has the required access.
481
500
 
482
501
  Args:
483
502
  session: The database session
484
- job_id: The ID of the job to verify
503
+ run_id: The ID of the run to verify
485
504
  actor: The user making the request
486
505
 
487
506
  Returns:
488
- The job if it exists and the user has access
507
+ The run if it exists and the user has access
489
508
 
490
509
  Raises:
491
- NoResultFound: If the job does not exist or user does not have access
510
+ NoResultFound: If the run does not exist or user does not have access
492
511
  """
493
- job_query = select(JobModel).where(JobModel.id == job_id)
494
- job_query = JobModel.apply_access_predicate(job_query, actor, access, AccessType.USER)
495
- job = session.execute(job_query).scalar_one_or_none()
496
- if not job:
497
- raise NoResultFound(f"Job with id {job_id} does not exist or user does not have access")
498
- return job
512
+ from letta.orm.run import Run as RunModel
513
+
514
+ run_query = select(RunModel).where(RunModel.id == run_id)
515
+ run_query = RunModel.apply_access_predicate(run_query, actor, access, AccessType.USER)
516
+ run = session.execute(run_query).scalar_one_or_none()
517
+ if not run:
518
+ raise NoResultFound(f"Run with id {run_id} does not exist or user does not have access")
519
+ return run
499
520
 
500
521
  @staticmethod
501
- async def _verify_job_access_async(
522
+ async def _verify_run_access_async(
502
523
  session: AsyncSession,
503
- job_id: str,
524
+ run_id: str,
504
525
  actor: PydanticUser,
505
526
  access: List[Literal["read", "write", "delete"]] = ["read"],
506
- ) -> JobModel:
527
+ ):
507
528
  """
508
- Verify that a job exists and the user has the required access asynchronously.
529
+ Verify that a run exists and the user has the required access asynchronously.
509
530
 
510
531
  Args:
511
532
  session: The async database session
512
- job_id: The ID of the job to verify
533
+ run_id: The ID of the run to verify
513
534
  actor: The user making the request
514
535
 
515
536
  Returns:
516
- The job if it exists and the user has access
537
+ The run if it exists and the user has access
517
538
 
518
539
  Raises:
519
- NoResultFound: If the job does not exist or user does not have access
540
+ NoResultFound: If the run does not exist or user does not have access
520
541
  """
521
- job_query = select(JobModel).where(JobModel.id == job_id)
522
- job_query = JobModel.apply_access_predicate(job_query, actor, access, AccessType.USER)
523
- result = await session.execute(job_query)
524
- job = result.scalar_one_or_none()
525
- if not job:
526
- raise NoResultFound(f"Job with id {job_id} does not exist or user does not have access")
527
- return job
542
+ from letta.orm.run import Run as RunModel
543
+
544
+ run_query = select(RunModel).where(RunModel.id == run_id)
545
+ run_query = RunModel.apply_access_predicate(run_query, actor, access, AccessType.USER)
546
+ result = await session.execute(run_query)
547
+ run = result.scalar_one_or_none()
548
+ if not run:
549
+ raise NoResultFound(f"Run with id {run_id} does not exist or user does not have access")
550
+ return run
528
551
 
529
552
 
530
553
  # noinspection PyTypeChecker
@@ -549,7 +572,7 @@ class NoopStepManager(StepManager):
549
572
  context_window_limit: int,
550
573
  usage: UsageStatistics,
551
574
  provider_id: Optional[str] = None,
552
- job_id: Optional[str] = None,
575
+ run_id: Optional[str] = None,
553
576
  step_id: Optional[str] = None,
554
577
  project_id: Optional[str] = None,
555
578
  stop_reason: Optional[LettaStopReason] = None,
@@ -572,7 +595,7 @@ class NoopStepManager(StepManager):
572
595
  context_window_limit: int,
573
596
  usage: UsageStatistics,
574
597
  provider_id: Optional[str] = None,
575
- job_id: Optional[str] = None,
598
+ run_id: Optional[str] = None,
576
599
  step_id: Optional[str] = None,
577
600
  project_id: Optional[str] = None,
578
601
  stop_reason: Optional[LettaStopReason] = None,
@@ -10,7 +10,7 @@ from letta.llm_api.llm_client import LLMClient
10
10
  from letta.log import get_logger
11
11
  from letta.otel.tracing import trace_method
12
12
  from letta.prompts import gpt_summarize
13
- from letta.schemas.enums import MessageRole
13
+ from letta.schemas.enums import AgentType, MessageRole
14
14
  from letta.schemas.letta_message_content import TextContent
15
15
  from letta.schemas.llm_config import LLMConfig
16
16
  from letta.schemas.message import Message, MessageCreate
@@ -189,6 +189,7 @@ class Summarizer:
189
189
  # We already packed, don't pack again
190
190
  wrap_user_message=False,
191
191
  wrap_system_message=False,
192
+ run_id=None, # TODO: add this
192
193
  )[0]
193
194
 
194
195
  # Create the message in the DB
@@ -383,17 +384,18 @@ async def simple_summary(messages: List[Message], llm_config: LLMConfig, actor:
383
384
  {"role": "user", "content": summary_transcript},
384
385
  ]
385
386
  input_messages_obj = [simple_message_wrapper(msg) for msg in input_messages]
386
- request_data = llm_client.build_request_data(input_messages_obj, llm_config, tools=[])
387
+ # Build a local LLMConfig for v1-style summarization which uses native content and must not
388
+ # include inner thoughts in kwargs to avoid conflicts in Anthropic formatting
389
+ summarizer_llm_config = LLMConfig(**llm_config.model_dump())
390
+ summarizer_llm_config.put_inner_thoughts_in_kwargs = False
387
391
 
388
- # NOTE: we should disable the inner_thoughts_in_kwargs here, because we don't use it
389
- # I'm leaving it commented it out for now for safety but is fine assuming the var here is a copy not a reference
390
- # llm_config.put_inner_thoughts_in_kwargs = False
392
+ request_data = llm_client.build_request_data(AgentType.letta_v1_agent, input_messages_obj, summarizer_llm_config, tools=[])
391
393
  try:
392
- response_data = await llm_client.request_async(request_data, llm_config)
394
+ response_data = await llm_client.request_async(request_data, summarizer_llm_config)
393
395
  except Exception as e:
394
396
  # handle LLM error (likely a context window exceeded error)
395
397
  raise llm_client.handle_llm_error(e)
396
- response = llm_client.convert_response_to_chat_completion(response_data, input_messages_obj, llm_config)
398
+ response = llm_client.convert_response_to_chat_completion(response_data, input_messages_obj, summarizer_llm_config)
397
399
  if response.choices[0].message.content is None:
398
400
  logger.warning("No content returned from summarizer")
399
401
  # TODO raise an error error instead?
@@ -39,22 +39,6 @@ class TelemetryManager:
39
39
  await session.commit()
40
40
  return pydantic_provider_trace
41
41
 
42
- @enforce_types
43
- @trace_method
44
- def create_provider_trace(self, actor: PydanticUser, provider_trace_create: ProviderTraceCreate) -> PydanticProviderTrace:
45
- with db_registry.session() as session:
46
- provider_trace = ProviderTraceModel(**provider_trace_create.model_dump())
47
- provider_trace.organization_id = actor.organization_id
48
- if provider_trace_create.request_json:
49
- request_json_str = json_dumps(provider_trace_create.request_json)
50
- provider_trace.request_json = json_loads(request_json_str)
51
-
52
- if provider_trace_create.response_json:
53
- response_json_str = json_dumps(provider_trace_create.response_json)
54
- provider_trace.response_json = json_loads(response_json_str)
55
- provider_trace.create(session, actor=actor)
56
- return provider_trace.to_pydantic()
57
-
58
42
 
59
43
  @singleton
60
44
  class NoopTelemetryManager(TelemetryManager):
@@ -209,6 +209,41 @@ class LettaBuiltinToolExecutor(ToolExecutor):
209
209
  from readability import Document
210
210
  from trafilatura import extract, fetch_url
211
211
 
212
+ # Try exa first
213
+ try:
214
+ from exa_py import Exa
215
+
216
+ agent_state_tool_env_vars = agent_state.get_agent_env_vars_as_dict()
217
+ exa_api_key = agent_state_tool_env_vars.get("EXA_API_KEY") or tool_settings.exa_api_key
218
+ if exa_api_key:
219
+ logger.info(f"[DEBUG] Starting Exa fetch content for url: '{url}'")
220
+ exa = Exa(api_key=exa_api_key)
221
+
222
+ results = await asyncio.to_thread(
223
+ lambda: exa.get_contents(
224
+ [url],
225
+ text=True,
226
+ ).results
227
+ )
228
+
229
+ if len(results) > 0:
230
+ result = results[0]
231
+ return json.dumps(
232
+ {
233
+ "title": result.title,
234
+ "published_date": result.published_date,
235
+ "author": result.author,
236
+ "text": result.text,
237
+ }
238
+ )
239
+ else:
240
+ logger.info(f"[DEBUG] Exa did not return content for '{url}', falling back to local fetch.")
241
+ else:
242
+ logger.info("[DEBUG] No Exa key available, falling back to local fetch.")
243
+ except ImportError:
244
+ logger.info("[DEBUG] Exa pip package unavailable, falling back to local fetch.")
245
+ pass
246
+
212
247
  try:
213
248
  # single thread pool call for the entire trafilatura pipeline
214
249
  def trafilatura_pipeline():