openai-sdk-helpers 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,9 +2,11 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import logging
6
+ import traceback
7
+ import uuid
5
8
  from pathlib import Path
6
9
  from typing import TYPE_CHECKING, Any, Dict, Optional, Protocol, cast
7
- import uuid
8
10
 
9
11
  from agents import Agent, Handoff, InputGuardrail, OutputGuardrail, Session
10
12
  from agents.model_settings import ModelSettings
@@ -13,7 +15,6 @@ from agents.tool import Tool
13
15
  from jinja2 import Template
14
16
 
15
17
  from ..environment import get_data_path
16
-
17
18
  from ..utils.json.data_class import DataclassJSONSerializable
18
19
  from ..structure.base import StructureBase
19
20
  from ..tools import (
@@ -183,6 +184,8 @@ class AgentBase(DataclassJSONSerializable):
183
184
  Return response tool handler and definition for Responses API use.
184
185
  build_response(openai_settings, data_path=None, tool_handlers=None, system_vector_store=None)
185
186
  Build a ResponseBase instance based on this agent.
187
+ save_error(exc)
188
+ Persist error details to a file named with the agent UUID.
186
189
  close()
187
190
  Clean up agent resources (can be overridden by subclasses).
188
191
  """
@@ -237,8 +240,6 @@ class AgentBase(DataclassJSONSerializable):
237
240
  else:
238
241
  self._data_path = data_path_obj / class_name
239
242
  else:
240
- from ..environment import get_data_path
241
-
242
243
  self._data_path = get_data_path(self.__class__.__name__)
243
244
 
244
245
  self._input_structure = configuration.input_structure
@@ -487,13 +488,29 @@ class AgentBase(DataclassJSONSerializable):
487
488
  output_structure = self._output_structure
488
489
  # Use session from parameter, fall back to configuration session
489
490
  session_to_use = session if session is not None else self._session
490
- return await run_async(
491
- agent=self.get_agent(),
492
- input=input,
493
- context=context,
494
- output_structure=output_structure,
495
- session=session_to_use,
496
- )
491
+ try:
492
+ return await run_async(
493
+ agent=self.get_agent(),
494
+ input=input,
495
+ context=context,
496
+ output_structure=output_structure,
497
+ session=session_to_use,
498
+ )
499
+ except Exception as exc:
500
+ try:
501
+ self.save_error(exc)
502
+ except Exception as save_exc:
503
+ log(
504
+ f"Failed to save error details for agent {self.uuid}: {save_exc}",
505
+ level=logging.ERROR,
506
+ exc=save_exc,
507
+ )
508
+ log(
509
+ f"Error running agent '{self.name}': {exc}",
510
+ level=logging.ERROR,
511
+ exc=exc,
512
+ )
513
+ raise
497
514
 
498
515
  def run_sync(
499
516
  self,
@@ -526,13 +543,29 @@ class AgentBase(DataclassJSONSerializable):
526
543
  output_structure = self._output_structure
527
544
  # Use session from parameter, fall back to configuration session
528
545
  session_to_use = session if session is not None else self._session
529
- return run_sync(
530
- agent=self.get_agent(),
531
- input=input,
532
- context=context,
533
- output_structure=output_structure,
534
- session=session_to_use,
535
- )
546
+ try:
547
+ return run_sync(
548
+ agent=self.get_agent(),
549
+ input=input,
550
+ context=context,
551
+ output_structure=output_structure,
552
+ session=session_to_use,
553
+ )
554
+ except Exception as exc:
555
+ try:
556
+ self.save_error(exc)
557
+ except Exception as save_exc:
558
+ log(
559
+ f"Failed to save error details for agent {self.uuid}: {save_exc}",
560
+ level=logging.ERROR,
561
+ exc=save_exc,
562
+ )
563
+ log(
564
+ f"Error running agent '{self.name}': {exc}",
565
+ level=logging.ERROR,
566
+ exc=exc,
567
+ )
568
+ raise
536
569
 
537
570
  def as_tool(self) -> Tool:
538
571
  """Return the agent as a callable tool.
@@ -731,5 +764,35 @@ class AgentBase(DataclassJSONSerializable):
731
764
  self.to_json_file(filepath=checked)
732
765
  log(f"Saved messages to {target}")
733
766
 
767
+ def save_error(self, exc: BaseException) -> Path:
768
+ """Persist error details to a file named with the agent UUID.
769
+
770
+ Parameters
771
+ ----------
772
+ exc : BaseException
773
+ Exception instance to serialize.
774
+
775
+ Returns
776
+ -------
777
+ Path
778
+ Path to the error file written to disk.
779
+
780
+ Examples
781
+ --------
782
+ >>> try:
783
+ ... agent.run_sync("trigger error")
784
+ ... except Exception as exc:
785
+ ... agent.save_error(exc)
786
+ """
787
+ error_text = "".join(
788
+ traceback.format_exception(type(exc), exc, exc.__traceback__)
789
+ )
790
+ filename = f"{str(self.uuid).lower()}_error.txt"
791
+ target = self._data_path / self.name / filename
792
+ checked = check_filepath(filepath=target)
793
+ checked.write_text(error_text, encoding="utf-8")
794
+ log(f"Saved error details to {checked}")
795
+ return checked
796
+
734
797
 
735
798
  __all__ = ["AgentConfigurationProtocol", "AgentBase"]
@@ -374,6 +374,7 @@ class FilesAPIManager:
374
374
  log(
375
375
  f"Error deleting tracked file {file_id}: {exc}",
376
376
  level=logging.WARNING,
377
+ exc=exc,
377
378
  )
378
379
  results[file_id] = False
379
380
 
@@ -8,6 +8,7 @@ def log(
8
8
  level: int = logging.INFO,
9
9
  *,
10
10
  logger_name: str = "openai_sdk_helpers",
11
+ exc: BaseException | None = None,
11
12
  ) -> None:
12
13
  """Log a message using Python's standard logging.
13
14
 
@@ -20,6 +21,13 @@ def log(
20
21
  Default is logging.INFO.
21
22
  logger_name : str
22
23
  Name of the logger. Default is "openai_sdk_helpers".
24
+ exc : BaseException or None, optional
25
+ Exception instance to include with the log record. Default is None.
26
+
27
+ Returns
28
+ -------
29
+ None
30
+ Return None after emitting the log entry.
23
31
 
24
32
  Examples
25
33
  --------
@@ -28,7 +36,10 @@ def log(
28
36
  >>> log("Debug info", level=logging.DEBUG)
29
37
  """
30
38
  logger = logging.getLogger(logger_name)
31
- logger.log(level, message)
39
+ exc_info = None
40
+ if exc is not None:
41
+ exc_info = (type(exc), exc, exc.__traceback__)
42
+ logger.log(level, message, exc_info=exc_info)
32
43
 
33
44
 
34
45
  __all__ = ["log"]
@@ -13,6 +13,7 @@ import inspect
13
13
  import json
14
14
  import logging
15
15
  import threading
16
+ import traceback
16
17
  import uuid
17
18
  from pathlib import Path
18
19
  from typing import (
@@ -137,6 +138,8 @@ class ResponseBase(Generic[T]):
137
138
  Construct a StreamlitAppConfig using this class as the builder.
138
139
  save(filepath=None)
139
140
  Serialize the message history to a JSON file.
141
+ save_error(exc)
142
+ Persist error details to a file named with the response UUID.
140
143
  close()
141
144
  Clean up remote resources including vector stores.
142
145
 
@@ -447,6 +450,9 @@ class ResponseBase(Generic[T]):
447
450
  When use_vector_store is True, this method automatically creates
448
451
  a vector store and adds a file_search tool for document retrieval.
449
452
  Images are always base64-encoded regardless of this setting.
453
+ When multiple content strings are provided, file attachments are
454
+ included only with the first message to avoid duplicating input
455
+ files across messages.
450
456
 
451
457
  Examples
452
458
  --------
@@ -470,8 +476,17 @@ class ResponseBase(Generic[T]):
470
476
  self, all_files, use_vector_store
471
477
  )
472
478
 
473
- # Add each content as a separate message with the same attachments
474
- for raw_content in contents:
479
+ attachments: list[
480
+ ResponseInputFileParam
481
+ | ResponseInputFileContentParam
482
+ | ResponseInputImageContentParam
483
+ ] = []
484
+ attachments.extend(vector_file_refs)
485
+ attachments.extend(base64_files)
486
+ attachments.extend(image_contents)
487
+
488
+ # Add each content as a separate message.
489
+ for index, raw_content in enumerate(contents):
475
490
  processed_text = raw_content.strip()
476
491
  input_content: list[
477
492
  ResponseInputTextParam
@@ -480,14 +495,8 @@ class ResponseBase(Generic[T]):
480
495
  | ResponseInputImageContentParam
481
496
  ] = [ResponseInputTextParam(type="input_text", text=processed_text)]
482
497
 
483
- # Add vector store file references
484
- input_content.extend(vector_file_refs)
485
-
486
- # Add base64 files
487
- input_content.extend(base64_files)
488
-
489
- # Add images
490
- input_content.extend(image_contents)
498
+ if index == 0:
499
+ input_content.extend(attachments)
491
500
 
492
501
  message = cast(
493
502
  ResponseInputItemParam,
@@ -555,96 +564,117 @@ class ResponseBase(Generic[T]):
555
564
  log(f"{self.__class__.__name__}::run_response")
556
565
  parsed_result: T | None = None
557
566
 
558
- self._build_input(
559
- content=content,
560
- files=(ensure_list(files) if files else None),
561
- use_vector_store=use_vector_store,
562
- )
563
-
564
- kwargs = {
565
- "input": self.messages.to_openai_payload(),
566
- "model": self._model,
567
- }
568
- if not self._tools and self._output_structure is not None:
569
- kwargs["text"] = self._output_structure.response_format()
570
-
571
- if self._tools:
572
- kwargs["tools"] = self._tools
573
- kwargs["tool_choice"] = "auto"
574
- response = self._client.responses.create(**kwargs)
567
+ try:
568
+ self._build_input(
569
+ content=content,
570
+ files=(ensure_list(files) if files else None),
571
+ use_vector_store=use_vector_store,
572
+ )
575
573
 
576
- if not response.output:
577
- log("No output returned from OpenAI.", level=logging.ERROR)
578
- raise RuntimeError("No output returned from OpenAI.")
574
+ kwargs = {
575
+ "input": self.messages.to_openai_payload(),
576
+ "model": self._model,
577
+ }
578
+ if not self._tools and self._output_structure is not None:
579
+ kwargs["text"] = self._output_structure.response_format()
579
580
 
580
- for response_output in response.output:
581
- if isinstance(response_output, ResponseFunctionToolCall):
582
- log(
583
- f"Tool call detected. Executing {response_output.name}.",
584
- level=logging.INFO,
585
- )
581
+ if self._tools:
582
+ kwargs["tools"] = self._tools
583
+ kwargs["tool_choice"] = "auto"
584
+ response = self._client.responses.create(**kwargs)
586
585
 
587
- tool_name = response_output.name
588
- registration = self._tool_handlers.get(tool_name)
586
+ if not response.output:
587
+ log("No output returned from OpenAI.", level=logging.ERROR)
588
+ raise RuntimeError("No output returned from OpenAI.")
589
589
 
590
- if registration is None:
590
+ for response_output in response.output:
591
+ if isinstance(response_output, ResponseFunctionToolCall):
591
592
  log(
592
- f"No handler found for tool '{tool_name}'",
593
- level=logging.ERROR,
593
+ f"Tool call detected. Executing {response_output.name}.",
594
+ level=logging.INFO,
594
595
  )
595
- raise ValueError(f"No handler for tool: {tool_name}")
596
596
 
597
- handler = registration.handler
598
- tool_spec = registration.tool_spec
597
+ tool_name = response_output.name
598
+ registration = self._tool_handlers.get(tool_name)
599
599
 
600
- try:
601
- if inspect.iscoroutinefunction(handler):
602
- tool_result_json = await handler(response_output)
603
- else:
604
- tool_result_json = handler(response_output)
605
- if isinstance(tool_result_json, str):
606
- tool_result = json.loads(tool_result_json)
607
- tool_output = tool_result_json
600
+ if registration is None:
601
+ log(
602
+ f"No handler found for tool '{tool_name}'",
603
+ level=logging.ERROR,
604
+ )
605
+ raise ValueError(f"No handler for tool: {tool_name}")
606
+
607
+ handler = registration.handler
608
+ tool_spec = registration.tool_spec
609
+
610
+ try:
611
+ if inspect.iscoroutinefunction(handler):
612
+ tool_result_json = await handler(response_output)
613
+ else:
614
+ tool_result_json = handler(response_output)
615
+ if isinstance(tool_result_json, str):
616
+ tool_result = json.loads(tool_result_json)
617
+ tool_output = tool_result_json
618
+ else:
619
+ tool_result = coerce_jsonable(tool_result_json)
620
+ tool_output = json.dumps(tool_result, cls=customJSONEncoder)
621
+ self.messages.add_tool_message(
622
+ content=response_output, output=tool_output
623
+ )
624
+ self.save()
625
+ except Exception as exc:
626
+ log(
627
+ f"Error executing tool handler '{tool_name}': {exc}",
628
+ level=logging.ERROR,
629
+ exc=exc,
630
+ )
631
+ raise RuntimeError(
632
+ f"Error in tool handler '{tool_name}': {exc}"
633
+ )
634
+
635
+ if tool_spec is not None:
636
+ output_dict = tool_spec.output_structure.from_json(tool_result)
637
+ parsed_result = cast(T, output_dict)
638
+ elif self._output_structure:
639
+ output_dict = self._output_structure.from_json(tool_result)
640
+ parsed_result = output_dict
608
641
  else:
609
- tool_result = coerce_jsonable(tool_result_json)
610
- tool_output = json.dumps(tool_result, cls=customJSONEncoder)
611
- self.messages.add_tool_message(
612
- content=response_output, output=tool_output
642
+ print(tool_result)
643
+ parsed_result = cast(T, tool_result)
644
+
645
+ if isinstance(response_output, ResponseOutputMessage):
646
+ self.messages.add_assistant_message(
647
+ response_output, metadata=kwargs
613
648
  )
614
649
  self.save()
615
- except Exception as exc:
616
- log(
617
- f"Error executing tool handler '{tool_name}': {exc}",
618
- level=logging.ERROR,
619
- )
620
- raise RuntimeError(f"Error in tool handler '{tool_name}': {exc}")
621
-
622
- if tool_spec is not None:
623
- output_dict = tool_spec.output_structure.from_json(tool_result)
624
- parsed_result = cast(T, output_dict)
625
- elif self._output_structure:
626
- output_dict = self._output_structure.from_json(tool_result)
627
- parsed_result = output_dict
628
- else:
629
- print(tool_result)
630
- parsed_result = cast(T, tool_result)
631
-
632
- if isinstance(response_output, ResponseOutputMessage):
633
- self.messages.add_assistant_message(response_output, metadata=kwargs)
634
- self.save()
635
- if hasattr(response, "output_text") and response.output_text:
636
- raw_text = response.output_text
637
- log("No tool call. Parsing output_text.")
638
- try:
639
- output_dict = json.loads(raw_text)
640
- if self._output_structure:
641
- return self._output_structure.from_json(output_dict)
642
- return output_dict
643
- except Exception:
644
- print(raw_text)
645
- if parsed_result is not None:
646
- return parsed_result
647
- return response.output_text
650
+ if hasattr(response, "output_text") and response.output_text:
651
+ raw_text = response.output_text
652
+ log("No tool call. Parsing output_text.")
653
+ try:
654
+ output_dict = json.loads(raw_text)
655
+ if self._output_structure:
656
+ return self._output_structure.from_json(output_dict)
657
+ return output_dict
658
+ except Exception:
659
+ print(raw_text)
660
+ if parsed_result is not None:
661
+ return parsed_result
662
+ return response.output_text
663
+ except Exception as exc:
664
+ try:
665
+ self.save_error(exc)
666
+ except Exception as save_exc:
667
+ log(
668
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
669
+ level=logging.ERROR,
670
+ exc=save_exc,
671
+ )
672
+ log(
673
+ f"Error running response '{self._name}': {exc}",
674
+ level=logging.ERROR,
675
+ exc=exc,
676
+ )
677
+ raise
648
678
 
649
679
  def run_sync(
650
680
  self,
@@ -865,6 +895,36 @@ class ResponseBase(Generic[T]):
865
895
  self.messages.to_json_file(str(checked))
866
896
  log(f"Saved messages to {target}")
867
897
 
898
+ def save_error(self, exc: BaseException) -> Path:
899
+ """Persist error details to a file named with the response UUID.
900
+
901
+ Parameters
902
+ ----------
903
+ exc : BaseException
904
+ Exception instance to serialize.
905
+
906
+ Returns
907
+ -------
908
+ Path
909
+ Path to the error file written to disk.
910
+
911
+ Examples
912
+ --------
913
+ >>> try:
914
+ ... response.run_sync("trigger error")
915
+ ... except Exception as exc:
916
+ ... response.save_error(exc)
917
+ """
918
+ error_text = "".join(
919
+ traceback.format_exception(type(exc), exc, exc.__traceback__)
920
+ )
921
+ filename = f"{str(self.uuid).lower()}_error.txt"
922
+ target = self._data_path / self._name / filename
923
+ checked = check_filepath(filepath=target)
924
+ checked.write_text(error_text, encoding="utf-8")
925
+ log(f"Saved error details to {checked}")
926
+ return checked
927
+
868
928
  def __repr__(self) -> str:
869
929
  """Return a detailed string representation of the response session.
870
930
 
@@ -936,7 +996,19 @@ class ResponseBase(Generic[T]):
936
996
  f"Files API cleanup: {successful}/{len(cleanup_results)} files deleted"
937
997
  )
938
998
  except Exception as exc:
939
- log(f"Error cleaning up Files API uploads: {exc}", level=logging.WARNING)
999
+ try:
1000
+ self.save_error(exc)
1001
+ except Exception as save_exc:
1002
+ log(
1003
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
1004
+ level=logging.ERROR,
1005
+ exc=save_exc,
1006
+ )
1007
+ log(
1008
+ f"Error cleaning up Files API uploads: {exc}",
1009
+ level=logging.WARNING,
1010
+ exc=exc,
1011
+ )
940
1012
 
941
1013
  # Always clean user vector storage if it exists
942
1014
  try:
@@ -944,6 +1016,18 @@ class ResponseBase(Generic[T]):
944
1016
  self._user_vector_storage.delete()
945
1017
  log("User vector store deleted.")
946
1018
  except Exception as exc:
947
- log(f"Error deleting user vector store: {exc}", level=logging.WARNING)
1019
+ try:
1020
+ self.save_error(exc)
1021
+ except Exception as save_exc:
1022
+ log(
1023
+ f"Failed to save error details for response {self.uuid}: {save_exc}",
1024
+ level=logging.ERROR,
1025
+ exc=save_exc,
1026
+ )
1027
+ log(
1028
+ f"Error deleting user vector store: {exc}",
1029
+ level=logging.WARNING,
1030
+ exc=exc,
1031
+ )
948
1032
  # System vector store cleanup is now handled via tool configuration
949
1033
  log(f"Session {self.uuid} closed.")
@@ -66,6 +66,12 @@ def process_files(
66
66
  2. Base64-encoded file content (ResponseInputFileContentParam)
67
67
  3. Base64-encoded image content (ResponseInputImageContentParam)
68
68
 
69
+ Notes
70
+ -----
71
+ Inline ``input_file`` attachments only support PDF documents. For other
72
+ document formats, use ``use_vector_store=True`` or convert to PDF before
73
+ calling this helper.
74
+
69
75
  Examples
70
76
  --------
71
77
  >>> from openai_sdk_helpers.response import process_files
@@ -93,6 +99,9 @@ def process_files(
93
99
  else:
94
100
  document_files.append(file_path)
95
101
 
102
+ if document_files and not use_vector_store:
103
+ _validate_inline_document_files(document_files)
104
+
96
105
  # Handle document files (vector store or base64)
97
106
  vector_file_refs: list[ResponseInputFileParam] = []
98
107
  base64_files: list[ResponseInputFileContentParam] = []
@@ -113,6 +122,34 @@ def process_files(
113
122
  return vector_file_refs, base64_files, image_contents
114
123
 
115
124
 
125
+ def _validate_inline_document_files(document_files: list[str]) -> None:
126
+ """Validate document files for inline ``input_file`` usage.
127
+
128
+ Parameters
129
+ ----------
130
+ document_files : list[str]
131
+ Document file paths that will be sent as inline ``input_file``
132
+ attachments.
133
+
134
+ Raises
135
+ ------
136
+ ValueError
137
+ If any document file is not a PDF.
138
+ """
139
+ unsupported_files = [
140
+ file_path
141
+ for file_path in document_files
142
+ if Path(file_path).suffix.lower() != ".pdf"
143
+ ]
144
+ if unsupported_files:
145
+ filenames = ", ".join(Path(path).name for path in unsupported_files)
146
+ raise ValueError(
147
+ "Inline input_file attachments support PDFs only. "
148
+ f"Unsupported files: {filenames}. "
149
+ "Convert to PDF or set use_vector_store=True."
150
+ )
151
+
152
+
116
153
  def _upload_to_vector_store(
117
154
  response: ResponseBase[Any], document_files: list[str]
118
155
  ) -> list[ResponseInputFileParam]:
@@ -283,7 +320,7 @@ def _encode_documents_base64_batch(
283
320
  base64_files.append(result)
284
321
  except Exception as exc:
285
322
  file_path = future_to_file[future]
286
- log(f"Error encoding document {file_path}: {exc}")
323
+ log(f"Error encoding document {file_path}: {exc}", exc=exc)
287
324
  raise
288
325
 
289
326
  return base64_files
@@ -385,7 +422,7 @@ def _encode_images_base64_batch(
385
422
  image_contents.append(result)
386
423
  except Exception as exc:
387
424
  image_path = future_to_file[future]
388
- log(f"Error encoding image {image_path}: {exc}")
425
+ log(f"Error encoding image {image_path}: {exc}", exc=exc)
389
426
  raise
390
427
 
391
428
  return image_contents
@@ -32,20 +32,12 @@ from openai_sdk_helpers.utils import (
32
32
 
33
33
  # Supported file extensions for OpenAI Assistants file search and vision
34
34
  SUPPORTED_FILE_EXTENSIONS = (
35
- ".csv",
36
- ".docx",
37
35
  ".gif",
38
- ".html",
39
- ".json",
40
36
  ".jpeg",
41
37
  ".jpg",
42
- ".md",
43
38
  ".pdf",
44
39
  ".png",
45
- ".pptx",
46
- ".txt",
47
40
  ".webp",
48
- ".xlsx",
49
41
  )
50
42
 
51
43
 
@@ -73,10 +73,11 @@ def _delete_all_vector_stores() -> None:
73
73
  log(
74
74
  f"Failed to delete orphaned file {file.id}: {exc}",
75
75
  level=logging.WARNING,
76
+ exc=exc,
76
77
  )
77
78
 
78
79
  except Exception as exc:
79
- log(f"Error during cleanup: {exc}", level=logging.ERROR)
80
+ log(f"Error during cleanup: {exc}", level=logging.ERROR, exc=exc)
80
81
 
81
82
 
82
83
  def _delete_all_files() -> None:
@@ -100,4 +101,8 @@ def _delete_all_files() -> None:
100
101
  log(f"Deleting file {file.id}")
101
102
  client.files.delete(file_id=file.id)
102
103
  except Exception as exc:
103
- log(f"Failed to delete file {file.id}: {exc}", level=logging.WARNING)
104
+ log(
105
+ f"Failed to delete file {file.id}: {exc}",
106
+ level=logging.WARNING,
107
+ exc=exc,
108
+ )
@@ -208,7 +208,11 @@ class VectorStorage:
208
208
  self._existing_files[file_name] = f.id
209
209
 
210
210
  except Exception as exc:
211
- log(f"Failed to load existing files: {exc}", level=logging.ERROR)
211
+ log(
212
+ f"Failed to load existing files: {exc}",
213
+ level=logging.ERROR,
214
+ exc=exc,
215
+ )
212
216
  self._existing_files = {}
213
217
  return self._existing_files
214
218
 
@@ -231,7 +235,11 @@ class VectorStorage:
231
235
  result[file_name] = f.id
232
236
  return result
233
237
  except Exception as exc:
234
- log(f"Failed to load existing files: {exc}", level=logging.ERROR)
238
+ log(
239
+ f"Failed to load existing files: {exc}",
240
+ level=logging.ERROR,
241
+ exc=exc,
242
+ )
235
243
  return {}
236
244
 
237
245
  def upload_file(
@@ -328,7 +336,11 @@ class VectorStorage:
328
336
 
329
337
  return VectorStorageFileInfo(name=file_name, id=file.id, status="success")
330
338
  except Exception as exc:
331
- log(f"Error uploading {file_name}: {str(exc)}", level=logging.ERROR)
339
+ log(
340
+ f"Error uploading {file_name}: {str(exc)}",
341
+ level=logging.ERROR,
342
+ exc=exc,
343
+ )
332
344
  return VectorStorageFileInfo(
333
345
  name=file_name, id="", status="error", error=str(exc)
334
346
  )
@@ -447,6 +459,7 @@ class VectorStorage:
447
459
  log(
448
460
  f"Warning: Could not delete file {file_id} from Files API: {file_delete_exc}",
449
461
  level=logging.WARNING,
462
+ exc=file_delete_exc,
450
463
  )
451
464
 
452
465
  to_remove = [k for k, v in self.existing_files.items() if v == file_id]
@@ -457,7 +470,11 @@ class VectorStorage:
457
470
  name=to_remove[0] if to_remove else "", id=file_id, status="success"
458
471
  )
459
472
  except Exception as exc:
460
- log(f"Error deleting file {file_id}: {str(exc)}", level=logging.ERROR)
473
+ log(
474
+ f"Error deleting file {file_id}: {str(exc)}",
475
+ level=logging.ERROR,
476
+ exc=exc,
477
+ )
461
478
  return VectorStorageFileInfo(
462
479
  name="", id=file_id, status="failed", error=str(exc)
463
480
  )
@@ -524,6 +541,7 @@ class VectorStorage:
524
541
  log(
525
542
  f"Error deleting vector store '{self._vector_storage.name}': {str(exc)}",
526
543
  level=logging.ERROR,
544
+ exc=exc,
527
545
  )
528
546
 
529
547
  def download_files(self, output_dir: str) -> VectorStorageFileStats:
@@ -551,7 +569,11 @@ class VectorStorage:
551
569
  )
552
570
  store_files = list(getattr(files, "data", files))
553
571
  except Exception as exc:
554
- log(f"Failed to list files for download: {exc}", level=logging.ERROR)
572
+ log(
573
+ f"Failed to list files for download: {exc}",
574
+ level=logging.ERROR,
575
+ exc=exc,
576
+ )
555
577
  return VectorStorageFileStats(
556
578
  total=0,
557
579
  fail=1,
@@ -582,7 +604,11 @@ class VectorStorage:
582
604
  handle.write(data)
583
605
  stats.success += 1
584
606
  except Exception as exc:
585
- log(f"Failed to download {file_id}: {exc}", level=logging.ERROR)
607
+ log(
608
+ f"Failed to download {file_id}: {exc}",
609
+ level=logging.ERROR,
610
+ exc=exc,
611
+ )
586
612
  stats.fail += 1
587
613
  stats.errors.append(
588
614
  VectorStorageFileInfo(
@@ -621,7 +647,11 @@ class VectorStorage:
621
647
  )
622
648
  return response
623
649
  except Exception as exc:
624
- log(f"Error searching vector store: {str(exc)}", level=logging.ERROR)
650
+ log(
651
+ f"Error searching vector store: {str(exc)}",
652
+ level=logging.ERROR,
653
+ exc=exc,
654
+ )
625
655
  return None
626
656
 
627
657
  def summarize(self, query: str, *, top_k: int = 15) -> str | None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-sdk-helpers
3
- Version: 0.5.0
3
+ Version: 0.5.2
4
4
  Summary: Composable helpers for OpenAI SDK agents, prompts, and storage
5
5
  Author: openai-sdk-helpers maintainers
6
6
  License: MIT
@@ -330,7 +330,7 @@ with ResponseBase(
330
330
  ) as response:
331
331
  # Automatic type detection - single files parameter
332
332
  # Images are sent as base64-encoded images
333
- # Documents are sent as base64-encoded file data
333
+ # PDF documents are sent as base64-encoded file data
334
334
  result = response.run_sync(
335
335
  "Analyze these files",
336
336
  files=["photo.jpg", "document.pdf"]
@@ -356,7 +356,8 @@ with ResponseBase(
356
356
  **How It Works:**
357
357
 
358
358
  - **Images** (jpg, png, gif, etc.) are automatically sent as base64-encoded images
359
- - **Documents** (pdf, txt, xlsx, etc.) are sent as base64-encoded file data by default
359
+ - **Documents** are sent as base64-encoded file data by default for PDFs only
360
+ - **Non-PDF documents** should use `use_vector_store=True` (or be converted to PDF)
360
361
  - **Vector Stores** can optionally be used for documents when `use_vector_store=True`
361
362
  - **Batch Processing** is automatically used for multiple files (>3) for efficient encoding
362
363
 
@@ -2,14 +2,14 @@ openai_sdk_helpers/__init__.py,sha256=8I469KuzrbAjhNX2A5UnYt_kSmjXqQbfHectTeUx7T
2
2
  openai_sdk_helpers/cli.py,sha256=BDc08NqWVfL4GBekxMfN5IPPB4pmN1Od9sVpKtIJRZk,8025
3
3
  openai_sdk_helpers/environment.py,sha256=mNoswzIdv37tTRhFwA2B6_Onxsm7vhfjPArfwhYuL7g,1825
4
4
  openai_sdk_helpers/errors.py,sha256=ZclLp94o08fSsFNjFn_yrX9yTjw1RE0v7A5T1hBChUc,2925
5
- openai_sdk_helpers/files_api.py,sha256=uMKHvGg1Od0J95Izl3AG9ofQYq8EDJXEty7zP0oKjJM,12569
6
- openai_sdk_helpers/logging.py,sha256=JcR0FTWht1tYdwD-bXH835pr0JV0RwHfY3poruiZGHM,795
5
+ openai_sdk_helpers/files_api.py,sha256=Sg-k4YDsrzggvICYA7h4Ua6_vGhMpZmAeS5JtQVE2hU,12598
6
+ openai_sdk_helpers/logging.py,sha256=djtMo_R_88JjxJeUGU_hSlYCTRv3ffoSu1ocOKrUBIw,1153
7
7
  openai_sdk_helpers/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  openai_sdk_helpers/settings.py,sha256=xK_u0YNKgtPrLrZrVr4F4k0CvSuYbsmkqqw9mCMdyF8,10932
9
9
  openai_sdk_helpers/tools.py,sha256=8hhcytpmDfoXV16UQbDmDVV0rhLOn8c_VjXO8XaTFLQ,19000
10
10
  openai_sdk_helpers/types.py,sha256=ejCG0rYqJhjOQvKLoNnzq-TzcKCFt69GVfi7y805NkU,1451
11
11
  openai_sdk_helpers/agent/__init__.py,sha256=SRz8WupyS89c7sYML2hkwZLkIu6zczyI9MVyDWAHK1w,1040
12
- openai_sdk_helpers/agent/base.py,sha256=y2LEkg4AVqNZQ_OPAn5yTy4x8lREq8tJPaUwAYTNHQY,24065
12
+ openai_sdk_helpers/agent/base.py,sha256=8LIwi7zuYcOsXBjpsNdFTdpY8Ih-iAYXkqzLn4wkd1w,26144
13
13
  openai_sdk_helpers/agent/configuration.py,sha256=FU3xnb8-8qoezLW47WwxZg7z2AxNXRW1Svl0FMsk8kc,14244
14
14
  openai_sdk_helpers/agent/coordinator.py,sha256=lVjA0yI-GhGKlqbNR_k9GOCrUjFoZ0QoqRaafHckyME,18052
15
15
  openai_sdk_helpers/agent/runner.py,sha256=l2NPS9VA9d4RISuBfanFfKxXNYSHQ7MTjRsuzx4APls,3473
@@ -40,9 +40,9 @@ openai_sdk_helpers/prompt/vector_planner.jinja,sha256=szzuJu6ZawYWuARgQn4DykBLig
40
40
  openai_sdk_helpers/prompt/vector_search.jinja,sha256=KPEYQDRKsUesadSyQcBBiqYQEDL1NLN6BQsqw-GcKMA,249
41
41
  openai_sdk_helpers/prompt/vector_writer.jinja,sha256=q5osfexGvt1xn8ZPtBWUP36n_1HK_Ziu8dkmCZDVamc,342
42
42
  openai_sdk_helpers/response/__init__.py,sha256=YFrGpnMIfatnLWXAZgZDMvDx7Yjsqjat8W9INxKuPxY,1728
43
- openai_sdk_helpers/response/base.py,sha256=pZYc2YpIzg9fYP98XqVN_sTDGRpgqd-UvrBZLfUw0KQ,34791
43
+ openai_sdk_helpers/response/base.py,sha256=jywi1EqMFSKVzAmrULn1NNqN11OYRtN8RjY3ezw4IuQ,37849
44
44
  openai_sdk_helpers/response/configuration.py,sha256=jxneKd7oj08D40ceOWETB3TeUHd7Cnz-ooQp0akI9fA,10465
45
- openai_sdk_helpers/response/files.py,sha256=zv5MpYZYHDBum4q9RJH_mVdnKHKOB72EB4nZwTwnlbU,13295
45
+ openai_sdk_helpers/response/files.py,sha256=O--boEPdFGsf9pHXPuNtG0aVJG2ZzwR4L1CZDW0hBP4,14450
46
46
  openai_sdk_helpers/response/messages.py,sha256=qX3sW79rLuJEys28zyv5MovZikwGOaLevzdVN0VYMRE,10104
47
47
  openai_sdk_helpers/response/planner.py,sha256=AuNMZkd4TGnvybSJaf2iMbvfPINbusrWucWBk2uQN_g,340
48
48
  openai_sdk_helpers/response/prompter.py,sha256=TLRLmNwPcxaaB_X76BbvwXlphducPKYVbn-afTqN2Rk,344
@@ -50,7 +50,7 @@ openai_sdk_helpers/response/runner.py,sha256=3VmWY5du5iBwjVU9D0n2hexu61f561m2iTv
50
50
  openai_sdk_helpers/response/tool_call.py,sha256=Y0ub14WJyuVyj9gRdnHFH2e-ezkfhJ9nnMlkubMKdug,2938
51
51
  openai_sdk_helpers/response/vector_store.py,sha256=HClp6O_g20uklQTY7trC4age3rtDmrt3tuvrl93xIf4,3222
52
52
  openai_sdk_helpers/streamlit_app/__init__.py,sha256=3yAkl6qV71cqtT5YFZuC9Bkqit0NtffDV6jmMWpT1k4,812
53
- openai_sdk_helpers/streamlit_app/app.py,sha256=YbCMOOjrC4rxqQP09WcFnHnfpIYYXn-9Tn-NBRGjFJI,17502
53
+ openai_sdk_helpers/streamlit_app/app.py,sha256=kkjtdCKVwrJ9nZWuBArm3dhvcjMESX0TMqAiF61_JLM,17402
54
54
  openai_sdk_helpers/streamlit_app/configuration.py,sha256=0KeJ4HqCNFthBHsedV6ptqHluAcTPBb5_TujFOGkIUU,16685
55
55
  openai_sdk_helpers/structure/__init__.py,sha256=JRdhEFPLDwty8tuAzCmep59RSknorZ4Dd7cyqonOi4Q,3891
56
56
  openai_sdk_helpers/structure/agent_blueprint.py,sha256=VyJWkgPNzAYKRDMeR1M4kE6qqQURnwqtrrEn0TRJf0g,9698
@@ -85,11 +85,11 @@ openai_sdk_helpers/utils/json/data_class.py,sha256=Bobc5yCZPMh093-0fiWaYNcEH7gUX
85
85
  openai_sdk_helpers/utils/json/ref.py,sha256=FqBIRWIw33Up3rFyTlLYljcuUjg43f6Nu5wX3tOXn54,2809
86
86
  openai_sdk_helpers/utils/json/utils.py,sha256=iyc25tnObqXQJWPKLZMVts932GArdKer59KuC8aQKsY,5948
87
87
  openai_sdk_helpers/vector_storage/__init__.py,sha256=L5LxO09puh9_yBB9IDTvc1CvVkARVkHqYY1KX3inB4c,975
88
- openai_sdk_helpers/vector_storage/cleanup.py,sha256=ImWIE-9lli-odD8qIARvmeaa0y8ZD4pYYP-kT0O3178,3552
89
- openai_sdk_helpers/vector_storage/storage.py,sha256=CcpATTNdeppmMfbQLnhv29hdEfE3wtAVsyKHNCrL-cI,23657
88
+ openai_sdk_helpers/vector_storage/cleanup.py,sha256=sZ4ZSTlnjF52o9Cc8A9dTX37ZYXXDxS_fdIpoOBWvrg,3666
89
+ openai_sdk_helpers/vector_storage/storage.py,sha256=t_ukacaXRa9EXE4-3BxsrB4Rjhu6nTu7NA9IjCJBIpQ,24259
90
90
  openai_sdk_helpers/vector_storage/types.py,sha256=jTCcOYMeOpZWvcse0z4T3MVs-RBOPC-fqWTBeQrgafU,1639
91
- openai_sdk_helpers-0.5.0.dist-info/METADATA,sha256=DoveMw25xYYVHQpPcj8FZjz4oEDvwPPbMALwVeU-fwI,24106
92
- openai_sdk_helpers-0.5.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
93
- openai_sdk_helpers-0.5.0.dist-info/entry_points.txt,sha256=gEOD1ZeXe8d2OP-KzUlG-b_9D9yUZTCt-GFW3EDbIIY,63
94
- openai_sdk_helpers-0.5.0.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
95
- openai_sdk_helpers-0.5.0.dist-info/RECORD,,
91
+ openai_sdk_helpers-0.5.2.dist-info/METADATA,sha256=xpFtABEk1-jSCL63qbJ4LRzrje5jHfHXwcLSrwuQ-6g,24185
92
+ openai_sdk_helpers-0.5.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
93
+ openai_sdk_helpers-0.5.2.dist-info/entry_points.txt,sha256=gEOD1ZeXe8d2OP-KzUlG-b_9D9yUZTCt-GFW3EDbIIY,63
94
+ openai_sdk_helpers-0.5.2.dist-info/licenses/LICENSE,sha256=CUhc1NrE50bs45tcXF7OcTQBKEvkUuLqeOHgrWQ5jaA,1067
95
+ openai_sdk_helpers-0.5.2.dist-info/RECORD,,