hivetrace 1.3.7__tar.gz → 1.3.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {hivetrace-1.3.7 → hivetrace-1.3.9}/PKG-INFO +80 -5
  2. {hivetrace-1.3.7 → hivetrace-1.3.9}/README.md +79 -4
  3. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/client/async_client.py +66 -4
  4. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/client/base.py +53 -1
  5. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/client/sync_client.py +68 -3
  6. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/models/responses.py +4 -0
  7. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace.egg-info/PKG-INFO +80 -5
  8. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace.egg-info/requires.txt +3 -0
  9. {hivetrace-1.3.7 → hivetrace-1.3.9}/setup.py +2 -1
  10. {hivetrace-1.3.7 → hivetrace-1.3.9}/LICENSE +0 -0
  11. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/__init__.py +0 -0
  12. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/__init__.py +0 -0
  13. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/base_adapter.py +0 -0
  14. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/__init__.py +0 -0
  15. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/adapter.py +0 -0
  16. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/decorators.py +0 -0
  17. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/monitored_agent.py +0 -0
  18. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/monitored_crew.py +0 -0
  19. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/crewai/tool_wrapper.py +0 -0
  20. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/__init__.py +0 -0
  21. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/adapter.py +0 -0
  22. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/api.py +0 -0
  23. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/behavior_tracker.py +0 -0
  24. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/callback.py +0 -0
  25. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/decorators.py +0 -0
  26. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/langchain/models.py +0 -0
  27. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/openai_agents/__init__.py +0 -0
  28. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/openai_agents/adapter.py +0 -0
  29. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/openai_agents/models.py +0 -0
  30. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/openai_agents/tracing.py +0 -0
  31. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/utils/__init__.py +0 -0
  32. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/adapters/utils/logging.py +0 -0
  33. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/client/__init__.py +0 -0
  34. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/errors/__init__.py +0 -0
  35. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/errors/api.py +0 -0
  36. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/errors/base.py +0 -0
  37. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/errors/network.py +0 -0
  38. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/errors/validation.py +0 -0
  39. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/handlers/__init__.py +0 -0
  40. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/handlers/error_handler.py +0 -0
  41. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/handlers/response_builder.py +0 -0
  42. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/models/__init__.py +0 -0
  43. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/models/requests.py +0 -0
  44. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/utils/__init__.py +0 -0
  45. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/utils/error_helpers.py +0 -0
  46. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace/utils/uuid_generator.py +0 -0
  47. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace.egg-info/SOURCES.txt +0 -0
  48. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace.egg-info/dependency_links.txt +0 -0
  49. {hivetrace-1.3.7 → hivetrace-1.3.9}/hivetrace.egg-info/top_level.txt +0 -0
  50. {hivetrace-1.3.7 → hivetrace-1.3.9}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hivetrace
3
- Version: 1.3.7
3
+ Version: 1.3.9
4
4
  Summary: Hivetrace SDK for monitoring LLM applications
5
5
  Home-page: http://hivetrace.ai
6
6
  Author: Raft
@@ -60,6 +60,16 @@ response = client.input(
60
60
  application_id="your-application-id", # Obtained after registering the application in the UI
61
61
  message="User prompt here",
62
62
  )
63
+
64
+ # Optionally attach files (filename, bytes, mime_type)
65
+ files = [
66
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
67
+ ]
68
+ response_with_files = client.input(
69
+ application_id="your-application-id",
70
+ message="User prompt with files",
71
+ files=files,
72
+ )
63
73
  ```
64
74
 
65
75
  ### Send an LLM response (output)
@@ -69,6 +79,16 @@ response = client.output(
69
79
  application_id="your-application-id",
70
80
  message="LLM response here",
71
81
  )
82
+
83
+ # With files
84
+ files = [
85
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
86
+ ]
87
+ response_with_files = client.output(
88
+ application_id="your-application-id",
89
+ message="LLM response with files",
90
+ files=files,
91
+ )
72
92
  ```
73
93
 
74
94
  ---
@@ -89,6 +109,16 @@ response = await client.input(
89
109
  application_id="your-application-id",
90
110
  message="User prompt here",
91
111
  )
112
+
113
+ # With files (filename, bytes, mime_type)
114
+ files = [
115
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
116
+ ]
117
+ response_with_files = await client.input(
118
+ application_id="your-application-id",
119
+ message="User prompt with files",
120
+ files=files,
121
+ )
92
122
  ```
93
123
 
94
124
  ### Send an LLM response (output)
@@ -98,6 +128,16 @@ response = await client.output(
98
128
  application_id="your-application-id",
99
129
  message="LLM response here",
100
130
  )
131
+
132
+ # With files
133
+ files = [
134
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
135
+ ]
136
+ response_with_files = await client.output(
137
+ application_id="your-application-id",
138
+ message="LLM response with files",
139
+ files=files,
140
+ )
101
141
  ```
102
142
 
103
143
  ---
@@ -130,10 +170,20 @@ response = client.input(
130
170
 
131
171
  ```python
132
172
  # Sync
133
- def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
173
+ def input(
174
+ application_id: str,
175
+ message: str,
176
+ additional_parameters: dict | None = None,
177
+ files: list[tuple[str, bytes, str]] | None = None,
178
+ ) -> dict: ...
134
179
 
135
180
  # Async
136
- async def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
181
+ async def input(
182
+ application_id: str,
183
+ message: str,
184
+ additional_parameters: dict | None = None,
185
+ files: list[tuple[str, bytes, str]] | None = None,
186
+ ) -> dict: ...
137
187
  ```
138
188
 
139
189
  Sends a **user prompt** to Hivetrace.
@@ -141,11 +191,15 @@ Sends a **user prompt** to Hivetrace.
141
191
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
142
192
  * `message` — The user prompt
143
193
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
194
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`; files are attached to the created analysis record
195
+
196
+ Response contains a `blocked` flag that indicates role restrictions.
144
197
 
145
198
  **Response example:**
146
199
 
147
200
  ```json
148
201
  {
202
+ "blocked": false,
149
203
  "status": "processed",
150
204
  "monitoring_result": {
151
205
  "is_toxic": false,
@@ -163,10 +217,20 @@ Sends a **user prompt** to Hivetrace.
163
217
 
164
218
  ```python
165
219
  # Sync
166
- def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
220
+ def output(
221
+ application_id: str,
222
+ message: str,
223
+ additional_parameters: dict | None = None,
224
+ files: list[tuple[str, bytes, str]] | None = None,
225
+ ) -> dict: ...
167
226
 
168
227
  # Async
169
- async def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
228
+ async def output(
229
+ application_id: str,
230
+ message: str,
231
+ additional_parameters: dict | None = None,
232
+ files: list[tuple[str, bytes, str]] | None = None,
233
+ ) -> dict: ...
170
234
  ```
171
235
 
172
236
  Sends an **LLM response** to Hivetrace.
@@ -174,11 +238,17 @@ Sends an **LLM response** to Hivetrace.
174
238
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
175
239
  * `message` — The LLM response
176
240
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
241
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`
242
+
243
+ > Files are uploaded after the main request completes and an analysis ID is available.
244
+
245
+ Response contains a `blocked` flag that indicates role restrictions.
177
246
 
178
247
  **Response example:**
179
248
 
180
249
  ```json
181
250
  {
251
+ "blocked": false,
182
252
  "status": "processed",
183
253
  "monitoring_result": {
184
254
  "is_toxic": false,
@@ -909,3 +979,8 @@ def calculate_sum(a: int, b: int) -> int:
909
979
  Add this tool to your agent’s `tools=[...]` — and its calls will appear in HiveTrace with inputs/outputs.
910
980
 
911
981
  ---
982
+
983
+ License
984
+ ========
985
+
986
+ This project is licensed under Apache License 2.0.
@@ -42,6 +42,16 @@ response = client.input(
42
42
  application_id="your-application-id", # Obtained after registering the application in the UI
43
43
  message="User prompt here",
44
44
  )
45
+
46
+ # Optionally attach files (filename, bytes, mime_type)
47
+ files = [
48
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
49
+ ]
50
+ response_with_files = client.input(
51
+ application_id="your-application-id",
52
+ message="User prompt with files",
53
+ files=files,
54
+ )
45
55
  ```
46
56
 
47
57
  ### Send an LLM response (output)
@@ -51,6 +61,16 @@ response = client.output(
51
61
  application_id="your-application-id",
52
62
  message="LLM response here",
53
63
  )
64
+
65
+ # With files
66
+ files = [
67
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
68
+ ]
69
+ response_with_files = client.output(
70
+ application_id="your-application-id",
71
+ message="LLM response with files",
72
+ files=files,
73
+ )
54
74
  ```
55
75
 
56
76
  ---
@@ -71,6 +91,16 @@ response = await client.input(
71
91
  application_id="your-application-id",
72
92
  message="User prompt here",
73
93
  )
94
+
95
+ # With files (filename, bytes, mime_type)
96
+ files = [
97
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
98
+ ]
99
+ response_with_files = await client.input(
100
+ application_id="your-application-id",
101
+ message="User prompt with files",
102
+ files=files,
103
+ )
74
104
  ```
75
105
 
76
106
  ### Send an LLM response (output)
@@ -80,6 +110,16 @@ response = await client.output(
80
110
  application_id="your-application-id",
81
111
  message="LLM response here",
82
112
  )
113
+
114
+ # With files
115
+ files = [
116
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
117
+ ]
118
+ response_with_files = await client.output(
119
+ application_id="your-application-id",
120
+ message="LLM response with files",
121
+ files=files,
122
+ )
83
123
  ```
84
124
 
85
125
  ---
@@ -112,10 +152,20 @@ response = client.input(
112
152
 
113
153
  ```python
114
154
  # Sync
115
- def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
155
+ def input(
156
+ application_id: str,
157
+ message: str,
158
+ additional_parameters: dict | None = None,
159
+ files: list[tuple[str, bytes, str]] | None = None,
160
+ ) -> dict: ...
116
161
 
117
162
  # Async
118
- async def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
163
+ async def input(
164
+ application_id: str,
165
+ message: str,
166
+ additional_parameters: dict | None = None,
167
+ files: list[tuple[str, bytes, str]] | None = None,
168
+ ) -> dict: ...
119
169
  ```
120
170
 
121
171
  Sends a **user prompt** to Hivetrace.
@@ -123,11 +173,15 @@ Sends a **user prompt** to Hivetrace.
123
173
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
124
174
  * `message` — The user prompt
125
175
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
176
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`; files are attached to the created analysis record
177
+
178
+ Response contains a `blocked` flag that indicates role restrictions.
126
179
 
127
180
  **Response example:**
128
181
 
129
182
  ```json
130
183
  {
184
+ "blocked": false,
131
185
  "status": "processed",
132
186
  "monitoring_result": {
133
187
  "is_toxic": false,
@@ -145,10 +199,20 @@ Sends a **user prompt** to Hivetrace.
145
199
 
146
200
  ```python
147
201
  # Sync
148
- def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
202
+ def output(
203
+ application_id: str,
204
+ message: str,
205
+ additional_parameters: dict | None = None,
206
+ files: list[tuple[str, bytes, str]] | None = None,
207
+ ) -> dict: ...
149
208
 
150
209
  # Async
151
- async def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
210
+ async def output(
211
+ application_id: str,
212
+ message: str,
213
+ additional_parameters: dict | None = None,
214
+ files: list[tuple[str, bytes, str]] | None = None,
215
+ ) -> dict: ...
152
216
  ```
153
217
 
154
218
  Sends an **LLM response** to Hivetrace.
@@ -156,11 +220,17 @@ Sends an **LLM response** to Hivetrace.
156
220
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
157
221
  * `message` — The LLM response
158
222
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
223
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`
224
+
225
+ > Files are uploaded after the main request completes and an analysis ID is available.
226
+
227
+ Response contains a `blocked` flag that indicates role restrictions.
159
228
 
160
229
  **Response example:**
161
230
 
162
231
  ```json
163
232
  {
233
+ "blocked": false,
164
234
  "status": "processed",
165
235
  "monitoring_result": {
166
236
  "is_toxic": false,
@@ -891,3 +961,8 @@ def calculate_sum(a: int, b: int) -> int:
891
961
  Add this tool to your agent’s `tools=[...]` — and its calls will appear in HiveTrace with inputs/outputs.
892
962
 
893
963
  ---
964
+
965
+ License
966
+ ========
967
+
968
+ This project is licensed under Apache License 2.0.
@@ -1,5 +1,5 @@
1
1
  import warnings
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
3
 
4
4
  import httpx
5
5
 
@@ -22,7 +22,6 @@ class AsyncHivetraceSDK(BaseHivetraceSDK):
22
22
  def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
23
23
  super().__init__(config)
24
24
  self.session = httpx.AsyncClient()
25
- # SDK асинхронный, задаем флаг для адаптеров
26
25
  self.async_mode = True
27
26
 
28
27
  async def __aenter__(self):
@@ -55,27 +54,90 @@ class AsyncHivetraceSDK(BaseHivetraceSDK):
55
54
  except Exception as e:
56
55
  return ErrorHandler.handle_unexpected_error(e)
57
56
 
57
+ async def _send_files(
58
+ self, endpoint: str, files: List[Tuple[str, bytes, str]]
59
+ ) -> HivetraceResponse:
60
+ request_args = self._build_files_request_args(endpoint, files)
61
+ try:
62
+ response = await self.session.post(**request_args)
63
+ response.raise_for_status()
64
+ api_data = response.json()
65
+ return ResponseBuilder.build_response_from_api(api_data)
66
+ except httpx.HTTPStatusError as e:
67
+ return ErrorHandler.handle_http_error(e)
68
+ except httpx.ConnectError as e:
69
+ return ErrorHandler.handle_connection_error(e)
70
+ except httpx.TimeoutException as e:
71
+ return ErrorHandler.handle_timeout_error(e)
72
+ except httpx.RequestError as e:
73
+ return ErrorHandler.handle_request_error(e)
74
+ except ValueError as e:
75
+ return ErrorHandler.handle_json_decode_error(e)
76
+ except Exception as e:
77
+ return ErrorHandler.handle_unexpected_error(e)
78
+
79
+ async def _get_blocking_status(self, endpoint: str) -> Optional[bool]:
80
+ url = f"{self.hivetrace_url}/{endpoint.lstrip('/')}"
81
+ headers = {"Authorization": f"Bearer {self.hivetrace_access_token}"}
82
+ try:
83
+ response = await self.session.get(
84
+ url, headers=headers, timeout=self._DEFAULT_TIMEOUT
85
+ )
86
+ response.raise_for_status()
87
+ data = response.json()
88
+ return data.get("blocked")
89
+ except Exception:
90
+ return None
91
+
58
92
  async def input(
59
93
  self,
60
94
  application_id: str,
61
95
  message: str,
62
96
  additional_parameters: Optional[Dict[str, Any]] = None,
97
+ files: Optional[List[Tuple[str, bytes, str]]] = None,
63
98
  ) -> HivetraceResponse:
64
99
  payload = self._build_message_payload(
65
100
  application_id, message, additional_parameters
66
101
  )
67
- return await self._send_request("/process_request/", payload)
102
+ process_response = await self._send_request("/process_request/", payload)
103
+ if files:
104
+ analysis_id = self._extract_analysis_id(process_response)
105
+ if analysis_id:
106
+ await self._send_files(
107
+ f"/user_prompt_analysis/{analysis_id}/attach_files", files
108
+ )
109
+ analysis_id = self._extract_analysis_id(process_response)
110
+ if analysis_id:
111
+ blocked = await self._get_blocking_status(
112
+ f"/user_prompt_analysis/{analysis_id}/check_blocking"
113
+ )
114
+ self._set_blocked(process_response, blocked)
115
+ return process_response
68
116
 
69
117
  async def output(
70
118
  self,
71
119
  application_id: str,
72
120
  message: str,
73
121
  additional_parameters: Optional[Dict[str, Any]] = None,
122
+ files: Optional[List[Tuple[str, bytes, str]]] = None,
74
123
  ) -> HivetraceResponse:
75
124
  payload = self._build_message_payload(
76
125
  application_id, message, additional_parameters
77
126
  )
78
- return await self._send_request("/process_response/", payload)
127
+ process_response = await self._send_request("/process_response/", payload)
128
+ if files:
129
+ analysis_id = self._extract_analysis_id(process_response)
130
+ if analysis_id:
131
+ await self._send_files(
132
+ f"/llm_response_analysis/{analysis_id}/attach_files", files
133
+ )
134
+ analysis_id = self._extract_analysis_id(process_response)
135
+ if analysis_id:
136
+ blocked = await self._get_blocking_status(
137
+ f"/llm_response_analysis/{analysis_id}/check_blocking"
138
+ )
139
+ self._set_blocked(process_response, blocked)
140
+ return process_response
79
141
 
80
142
  async def function_call(
81
143
  self,
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import uuid
3
3
  from abc import ABC, abstractmethod
4
- from typing import Any, Dict, Optional, Union
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
5
 
6
6
  import httpx
7
7
  from pydantic import ValidationError
@@ -94,6 +94,58 @@ class BaseHivetraceSDK(ABC):
94
94
  "timeout": self._DEFAULT_TIMEOUT,
95
95
  }
96
96
 
97
+ def _build_files_request_args(
98
+ self,
99
+ endpoint: str,
100
+ files: List[Tuple[str, bytes, str]],
101
+ files_field_name: str = "attached_files",
102
+ ) -> Dict[str, Any]:
103
+ """Builds request args for multipart file upload."""
104
+ url = f"{self.hivetrace_url}/{endpoint.lstrip('/')}"
105
+ headers = {"Authorization": f"Bearer {self.hivetrace_access_token}"}
106
+ return {
107
+ "url": url,
108
+ "files": self._prepare_files_param(files, files_field_name),
109
+ "headers": headers,
110
+ "timeout": self._DEFAULT_TIMEOUT,
111
+ }
112
+
113
+ @staticmethod
114
+ def _prepare_files_param(
115
+ files: List[Tuple[str, bytes, str]],
116
+ files_field_name: str = "attached_files",
117
+ ) -> List[Tuple[str, Tuple[str, bytes, str]]]:
118
+ files_param: List[Tuple[str, Tuple[str, bytes, str]]] = []
119
+ for file_tuple in files:
120
+ files_param.append((files_field_name, file_tuple))
121
+ return files_param
122
+
123
+ @staticmethod
124
+ def _extract_analysis_id(response: Any) -> Optional[str]:
125
+ """Extracts analysis id from API response if present."""
126
+ try:
127
+ if isinstance(response, dict):
128
+ monitoring_result = response.get("monitoring_result", {})
129
+ analysis_id = monitoring_result.get("id")
130
+ return str(analysis_id) if analysis_id is not None else None
131
+ except Exception:
132
+ return None
133
+ return None
134
+
135
+ @staticmethod
136
+ def _set_blocked(response: Any, blocked: Optional[bool]) -> Any:
137
+ """Sets 'blocked' flag on response when possible."""
138
+ try:
139
+ if isinstance(response, dict):
140
+ response["blocked"] = blocked
141
+ return response
142
+ if hasattr(response, "blocked"):
143
+ setattr(response, "blocked", blocked)
144
+ return response
145
+ except Exception:
146
+ return response
147
+ return response
148
+
97
149
  def _handle_http_error(self, error: httpx.HTTPStatusError) -> HivetraceResponse:
98
150
  return ErrorHandler.handle_http_error(error)
99
151
 
@@ -1,5 +1,5 @@
1
1
  import weakref
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Dict, List, Optional, Tuple, Union
3
3
 
4
4
  import httpx
5
5
 
@@ -67,27 +67,92 @@ class SyncHivetraceSDK(BaseHivetraceSDK):
67
67
  except Exception as e:
68
68
  return ErrorHandler.handle_unexpected_error(e)
69
69
 
70
+ def _send_files(
71
+ self, endpoint: str, files: List[Tuple[str, bytes, str]]
72
+ ) -> HivetraceResponse:
73
+ request_args = self._build_files_request_args(endpoint, files)
74
+ try:
75
+ response = self.session.post(**request_args)
76
+ response.raise_for_status()
77
+
78
+ api_data = response.json()
79
+ return ResponseBuilder.build_response_from_api(api_data)
80
+
81
+ except httpx.HTTPStatusError as e:
82
+ return ErrorHandler.handle_http_error(e)
83
+ except httpx.ConnectError as e:
84
+ return ErrorHandler.handle_connection_error(e)
85
+ except httpx.TimeoutException as e:
86
+ return ErrorHandler.handle_timeout_error(e)
87
+ except httpx.RequestError as e:
88
+ return ErrorHandler.handle_request_error(e)
89
+ except ValueError as e:
90
+ return ErrorHandler.handle_json_decode_error(e)
91
+ except Exception as e:
92
+ return ErrorHandler.handle_unexpected_error(e)
93
+
94
+ def _get_blocking_status(self, endpoint: str) -> Optional[bool]:
95
+ url = f"{self.hivetrace_url}/{endpoint.lstrip('/')}"
96
+ headers = {"Authorization": f"Bearer {self.hivetrace_access_token}"}
97
+ try:
98
+ response = self.session.get(
99
+ url, headers=headers, timeout=self._DEFAULT_TIMEOUT
100
+ )
101
+ response.raise_for_status()
102
+ data = response.json()
103
+ return data.get("blocked")
104
+ except Exception:
105
+ return None
106
+
70
107
  def input(
71
108
  self,
72
109
  application_id: str,
73
110
  message: str,
74
111
  additional_parameters: Optional[Dict[str, Any]] = None,
112
+ files: Optional[List[Tuple[str, bytes, str]]] = None,
75
113
  ) -> HivetraceResponse:
76
114
  payload = self._build_message_payload(
77
115
  application_id, message, additional_parameters
78
116
  )
79
- return self._send_request("/process_request/", payload)
117
+ process_response = self._send_request("/process_request/", payload)
118
+ if files:
119
+ analysis_id = self._extract_analysis_id(process_response)
120
+ if analysis_id:
121
+ self._send_files(
122
+ f"/user_prompt_analysis/{analysis_id}/attach_files", files
123
+ )
124
+ analysis_id = self._extract_analysis_id(process_response)
125
+ if analysis_id:
126
+ blocked = self._get_blocking_status(
127
+ f"/user_prompt_analysis/{analysis_id}/check_blocking"
128
+ )
129
+ self._set_blocked(process_response, blocked)
130
+ return process_response
80
131
 
81
132
  def output(
82
133
  self,
83
134
  application_id: str,
84
135
  message: str,
85
136
  additional_parameters: Optional[Dict[str, Any]] = None,
137
+ files: Optional[List[Tuple[str, bytes, str]]] = None,
86
138
  ) -> HivetraceResponse:
87
139
  payload = self._build_message_payload(
88
140
  application_id, message, additional_parameters
89
141
  )
90
- return self._send_request("/process_response/", payload)
142
+ process_response = self._send_request("/process_response/", payload)
143
+ if files:
144
+ analysis_id = self._extract_analysis_id(process_response)
145
+ if analysis_id:
146
+ self._send_files(
147
+ f"/llm_response_analysis/{analysis_id}/attach_files", files
148
+ )
149
+ analysis_id = self._extract_analysis_id(process_response)
150
+ if analysis_id:
151
+ blocked = self._get_blocking_status(
152
+ f"/llm_response_analysis/{analysis_id}/check_blocking"
153
+ )
154
+ self._set_blocked(process_response, blocked)
155
+ return process_response
91
156
 
92
157
  def function_call(
93
158
  self,
@@ -24,6 +24,10 @@ class ProcessResponse(SuccessResponse):
24
24
 
25
25
  message_id: Optional[str] = Field(None, description="ID of processed message")
26
26
  trace_id: Optional[str] = Field(None, description="Trace ID")
27
+ blocked: Optional[bool] = Field(
28
+ None,
29
+ description="Role restriction flag. True if message/response is blocked by policy.",
30
+ )
27
31
 
28
32
 
29
33
  class ErrorResponse(BaseResponse):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: hivetrace
3
- Version: 1.3.7
3
+ Version: 1.3.9
4
4
  Summary: Hivetrace SDK for monitoring LLM applications
5
5
  Home-page: http://hivetrace.ai
6
6
  Author: Raft
@@ -60,6 +60,16 @@ response = client.input(
60
60
  application_id="your-application-id", # Obtained after registering the application in the UI
61
61
  message="User prompt here",
62
62
  )
63
+
64
+ # Optionally attach files (filename, bytes, mime_type)
65
+ files = [
66
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
67
+ ]
68
+ response_with_files = client.input(
69
+ application_id="your-application-id",
70
+ message="User prompt with files",
71
+ files=files,
72
+ )
63
73
  ```
64
74
 
65
75
  ### Send an LLM response (output)
@@ -69,6 +79,16 @@ response = client.output(
69
79
  application_id="your-application-id",
70
80
  message="LLM response here",
71
81
  )
82
+
83
+ # With files
84
+ files = [
85
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
86
+ ]
87
+ response_with_files = client.output(
88
+ application_id="your-application-id",
89
+ message="LLM response with files",
90
+ files=files,
91
+ )
72
92
  ```
73
93
 
74
94
  ---
@@ -89,6 +109,16 @@ response = await client.input(
89
109
  application_id="your-application-id",
90
110
  message="User prompt here",
91
111
  )
112
+
113
+ # With files (filename, bytes, mime_type)
114
+ files = [
115
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
116
+ ]
117
+ response_with_files = await client.input(
118
+ application_id="your-application-id",
119
+ message="User prompt with files",
120
+ files=files,
121
+ )
92
122
  ```
93
123
 
94
124
  ### Send an LLM response (output)
@@ -98,6 +128,16 @@ response = await client.output(
98
128
  application_id="your-application-id",
99
129
  message="LLM response here",
100
130
  )
131
+
132
+ # With files
133
+ files = [
134
+ ("doc1.txt", open("doc1.txt", "rb"), "text/plain"),
135
+ ]
136
+ response_with_files = await client.output(
137
+ application_id="your-application-id",
138
+ message="LLM response with files",
139
+ files=files,
140
+ )
101
141
  ```
102
142
 
103
143
  ---
@@ -130,10 +170,20 @@ response = client.input(
130
170
 
131
171
  ```python
132
172
  # Sync
133
- def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
173
+ def input(
174
+ application_id: str,
175
+ message: str,
176
+ additional_parameters: dict | None = None,
177
+ files: list[tuple[str, bytes, str]] | None = None,
178
+ ) -> dict: ...
134
179
 
135
180
  # Async
136
- async def input(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
181
+ async def input(
182
+ application_id: str,
183
+ message: str,
184
+ additional_parameters: dict | None = None,
185
+ files: list[tuple[str, bytes, str]] | None = None,
186
+ ) -> dict: ...
137
187
  ```
138
188
 
139
189
  Sends a **user prompt** to Hivetrace.
@@ -141,11 +191,15 @@ Sends a **user prompt** to Hivetrace.
141
191
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
142
192
  * `message` — The user prompt
143
193
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
194
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`; files are attached to the created analysis record
195
+
196
+ Response contains a `blocked` flag that indicates role restrictions.
144
197
 
145
198
  **Response example:**
146
199
 
147
200
  ```json
148
201
  {
202
+ "blocked": false,
149
203
  "status": "processed",
150
204
  "monitoring_result": {
151
205
  "is_toxic": false,
@@ -163,10 +217,20 @@ Sends a **user prompt** to Hivetrace.
163
217
 
164
218
  ```python
165
219
  # Sync
166
- def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
220
+ def output(
221
+ application_id: str,
222
+ message: str,
223
+ additional_parameters: dict | None = None,
224
+ files: list[tuple[str, bytes, str]] | None = None,
225
+ ) -> dict: ...
167
226
 
168
227
  # Async
169
- async def output(application_id: str, message: str, additional_parameters: dict | None = None) -> dict: ...
228
+ async def output(
229
+ application_id: str,
230
+ message: str,
231
+ additional_parameters: dict | None = None,
232
+ files: list[tuple[str, bytes, str]] | None = None,
233
+ ) -> dict: ...
170
234
  ```
171
235
 
172
236
  Sends an **LLM response** to Hivetrace.
@@ -174,11 +238,17 @@ Sends an **LLM response** to Hivetrace.
174
238
  * `application_id` — Application identifier (must be a valid UUID, created in the UI)
175
239
  * `message` — The LLM response
176
240
  * `additional_parameters` — Optional dictionary with extra context (session, user, agents, etc.)
241
+ * `files` — Optional list of tuples `(filename: str, content: bytes, mime_type: str)`
242
+
243
+ > Files are uploaded after the main request completes and an analysis ID is available.
244
+
245
+ Response contains a `blocked` flag that indicates role restrictions.
177
246
 
178
247
  **Response example:**
179
248
 
180
249
  ```json
181
250
  {
251
+ "blocked": false,
182
252
  "status": "processed",
183
253
  "monitoring_result": {
184
254
  "is_toxic": false,
@@ -909,3 +979,8 @@ def calculate_sum(a: int, b: int) -> int:
909
979
  Add this tool to your agent’s `tools=[...]` — and its calls will appear in HiveTrace with inputs/outputs.
910
980
 
911
981
  ---
982
+
983
+ License
984
+ ========
985
+
986
+ This project is licensed under Apache License 2.0.
@@ -1,4 +1,5 @@
1
1
  httpx>=0.28.1
2
+ pydantic>=2.11.7
2
3
  python-dotenv>=1.0.1
3
4
 
4
5
  [all]
@@ -9,10 +10,12 @@ langchain-openai==0.2.5
9
10
  langchain==0.3.19
10
11
  langchain_experimental==0.3.4
11
12
  openai-agents>=0.1.0
13
+ pydantic>=2.11.7
12
14
  python-dotenv>=1.0.1
13
15
 
14
16
  [base]
15
17
  httpx>=0.28.1
18
+ pydantic>=2.11.7
16
19
  python-dotenv>=1.0.1
17
20
 
18
21
  [crewai]
@@ -9,6 +9,7 @@ def readme():
9
9
  base_requires = [
10
10
  "httpx>=0.28.1",
11
11
  "python-dotenv>=1.0.1",
12
+ "pydantic>=2.11.7",
12
13
  ]
13
14
 
14
15
  langchain_requires = [
@@ -28,7 +29,7 @@ openai_agents_requires = [
28
29
 
29
30
  setup(
30
31
  name="hivetrace",
31
- version="1.3.7",
32
+ version="1.3.9",
32
33
  author="Raft",
33
34
  author_email="sales@raftds.com",
34
35
  description="Hivetrace SDK for monitoring LLM applications",
File without changes
File without changes