dhisana 0.0.1.dev116__py3-none-any.whl → 0.0.1.dev236__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dhisana/schemas/common.py +10 -1
- dhisana/schemas/sales.py +203 -22
- dhisana/utils/add_mapping.py +0 -2
- dhisana/utils/apollo_tools.py +739 -119
- dhisana/utils/built_with_api_tools.py +4 -2
- dhisana/utils/check_email_validity_tools.py +35 -18
- dhisana/utils/check_for_intent_signal.py +1 -2
- dhisana/utils/check_linkedin_url_validity.py +34 -8
- dhisana/utils/clay_tools.py +3 -2
- dhisana/utils/clean_properties.py +1 -4
- dhisana/utils/compose_salesnav_query.py +0 -1
- dhisana/utils/compose_search_query.py +7 -3
- dhisana/utils/composite_tools.py +0 -1
- dhisana/utils/dataframe_tools.py +2 -2
- dhisana/utils/email_body_utils.py +72 -0
- dhisana/utils/email_provider.py +174 -35
- dhisana/utils/enrich_lead_information.py +183 -53
- dhisana/utils/fetch_openai_config.py +129 -0
- dhisana/utils/field_validators.py +1 -1
- dhisana/utils/g2_tools.py +0 -1
- dhisana/utils/generate_content.py +0 -1
- dhisana/utils/generate_email.py +68 -23
- dhisana/utils/generate_email_response.py +294 -46
- dhisana/utils/generate_flow.py +0 -1
- dhisana/utils/generate_linkedin_connect_message.py +9 -2
- dhisana/utils/generate_linkedin_response_message.py +137 -66
- dhisana/utils/generate_structured_output_internal.py +317 -164
- dhisana/utils/google_custom_search.py +150 -44
- dhisana/utils/google_oauth_tools.py +721 -0
- dhisana/utils/google_workspace_tools.py +278 -54
- dhisana/utils/hubspot_clearbit.py +3 -1
- dhisana/utils/hubspot_crm_tools.py +718 -272
- dhisana/utils/instantly_tools.py +3 -1
- dhisana/utils/lusha_tools.py +10 -7
- dhisana/utils/mailgun_tools.py +150 -0
- dhisana/utils/microsoft365_tools.py +447 -0
- dhisana/utils/openai_assistant_and_file_utils.py +121 -177
- dhisana/utils/openai_helpers.py +8 -6
- dhisana/utils/parse_linkedin_messages_txt.py +1 -3
- dhisana/utils/profile.py +37 -0
- dhisana/utils/proxy_curl_tools.py +377 -76
- dhisana/utils/proxycurl_search_leads.py +426 -0
- dhisana/utils/research_lead.py +3 -3
- dhisana/utils/sales_navigator_crawler.py +1 -6
- dhisana/utils/salesforce_crm_tools.py +323 -50
- dhisana/utils/search_router.py +131 -0
- dhisana/utils/search_router_jobs.py +51 -0
- dhisana/utils/sendgrid_tools.py +126 -91
- dhisana/utils/serarch_router_local_business.py +75 -0
- dhisana/utils/serpapi_additional_tools.py +290 -0
- dhisana/utils/serpapi_google_jobs.py +117 -0
- dhisana/utils/serpapi_google_search.py +188 -0
- dhisana/utils/serpapi_local_business_search.py +129 -0
- dhisana/utils/serpapi_search_tools.py +360 -432
- dhisana/utils/serperdev_google_jobs.py +125 -0
- dhisana/utils/serperdev_local_business.py +154 -0
- dhisana/utils/serperdev_search.py +233 -0
- dhisana/utils/smtp_email_tools.py +178 -18
- dhisana/utils/test_connect.py +1603 -130
- dhisana/utils/trasform_json.py +3 -3
- dhisana/utils/web_download_parse_tools.py +0 -1
- dhisana/utils/zoominfo_tools.py +2 -3
- dhisana/workflow/test.py +1 -1
- {dhisana-0.0.1.dev116.dist-info → dhisana-0.0.1.dev236.dist-info}/METADATA +1 -1
- dhisana-0.0.1.dev236.dist-info/RECORD +100 -0
- {dhisana-0.0.1.dev116.dist-info → dhisana-0.0.1.dev236.dist-info}/WHEEL +1 -1
- dhisana-0.0.1.dev116.dist-info/RECORD +0 -83
- {dhisana-0.0.1.dev116.dist-info → dhisana-0.0.1.dev236.dist-info}/entry_points.txt +0 -0
- {dhisana-0.0.1.dev116.dist-info → dhisana-0.0.1.dev236.dist-info}/top_level.txt +0 -0
|
@@ -1,39 +1,43 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Vector-store and file helpers that work with **either** OpenAI or Azure OpenAI,
|
|
3
|
+
using the shared factory functions defined in `dhisana.utils.fetch_openai_config`.
|
|
4
|
+
|
|
5
|
+
Only the client initialisation lines changed; all business logic is untouched.
|
|
6
|
+
"""
|
|
7
|
+
|
|
1
8
|
import json
|
|
2
9
|
import logging
|
|
3
10
|
import re
|
|
4
11
|
import traceback
|
|
5
|
-
from typing import
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
6
13
|
|
|
7
14
|
from fastapi import HTTPException
|
|
8
|
-
from pydantic import BaseModel
|
|
9
|
-
import openai
|
|
10
|
-
from dhisana.utils.openai_helpers import get_openai_access_token
|
|
11
15
|
|
|
16
|
+
import openai # still needed for openai.NotFoundError
|
|
17
|
+
from dhisana.utils.fetch_openai_config import (
|
|
18
|
+
create_openai_client, # synchronous client
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# ---------------------------------------------------------------------------
|
|
22
|
+
# Vector-store helpers
|
|
23
|
+
# ---------------------------------------------------------------------------
|
|
12
24
|
|
|
13
|
-
# -----------------------------------------------------------------------------
|
|
14
|
-
# Vector Store Helpers
|
|
15
|
-
# -----------------------------------------------------------------------------
|
|
16
25
|
|
|
17
26
|
async def create_vector_store(
|
|
18
27
|
vector_store_name: str,
|
|
19
|
-
tool_config: Optional[List[Dict]] = None
|
|
28
|
+
tool_config: Optional[List[Dict]] = None,
|
|
20
29
|
) -> Dict[str, Any]:
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
normalized_name = re.sub(r'[^a-z0-9_]+', '_', normalized_name)
|
|
26
|
-
normalized_name = normalized_name[:64]
|
|
27
|
-
openai_key = get_openai_access_token(tool_config)
|
|
28
|
-
|
|
29
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
30
|
+
"""Create a new vector store and return its metadata."""
|
|
31
|
+
normalized_name = re.sub(r"[^a-z0-9_]+", "_", vector_store_name.lower())[:64]
|
|
32
|
+
client = create_openai_client(tool_config)
|
|
33
|
+
|
|
30
34
|
try:
|
|
31
|
-
|
|
35
|
+
vs = client.vector_stores.create(name=normalized_name)
|
|
32
36
|
return {
|
|
33
|
-
"id":
|
|
34
|
-
"name":
|
|
35
|
-
"created_at":
|
|
36
|
-
"file_count":
|
|
37
|
+
"id": vs.id,
|
|
38
|
+
"name": vs.name,
|
|
39
|
+
"created_at": vs.created_at,
|
|
40
|
+
"file_count": vs.file_counts.completed,
|
|
37
41
|
}
|
|
38
42
|
except Exception as e:
|
|
39
43
|
logging.error(f"Error creating vector store: {e}\n{traceback.format_exc()}")
|
|
@@ -42,64 +46,49 @@ async def create_vector_store(
|
|
|
42
46
|
|
|
43
47
|
async def delete_vector_store(
|
|
44
48
|
vector_store_id: str,
|
|
45
|
-
tool_config: Optional[List[Dict]] = None
|
|
49
|
+
tool_config: Optional[List[Dict]] = None,
|
|
46
50
|
) -> None:
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
"""
|
|
50
|
-
openai_key = get_openai_access_token(tool_config)
|
|
51
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
51
|
+
"""Delete a vector store by ID."""
|
|
52
|
+
client = create_openai_client(tool_config)
|
|
52
53
|
try:
|
|
53
54
|
client.vector_stores.delete(vector_store_id=vector_store_id)
|
|
54
55
|
except Exception as e:
|
|
55
56
|
logging.error(f"Error deleting vector store {vector_store_id}: {e}")
|
|
56
57
|
raise HTTPException(status_code=400, detail=str(e))
|
|
57
58
|
|
|
59
|
+
# ---------------------------------------------------------------------------
|
|
60
|
+
# File-upload helpers
|
|
61
|
+
# ---------------------------------------------------------------------------
|
|
58
62
|
|
|
59
|
-
# -----------------------------------------------------------------------------
|
|
60
|
-
# File Upload Helpers
|
|
61
|
-
# -----------------------------------------------------------------------------
|
|
62
63
|
|
|
63
64
|
async def upload_file_openai_and_vector_store(
|
|
64
65
|
file_path_or_bytes: Any,
|
|
65
66
|
file_name: str,
|
|
66
67
|
mime_type: str,
|
|
67
68
|
vector_store_id: str,
|
|
68
|
-
tool_config: Optional[List[Dict]] = None
|
|
69
|
+
tool_config: Optional[List[Dict]] = None,
|
|
69
70
|
):
|
|
70
|
-
"""
|
|
71
|
-
|
|
72
|
-
"""
|
|
73
|
-
openai_key = get_openai_access_token(tool_config)
|
|
74
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
75
|
-
purpose = "assistants"
|
|
76
|
-
if mime_type in ["image/jpeg", "image/png"]:
|
|
77
|
-
purpose = "vision"
|
|
71
|
+
"""Upload a file and attach it to a vector store (purpose = assistants / vision)."""
|
|
72
|
+
client = create_openai_client(tool_config)
|
|
73
|
+
purpose = "vision" if mime_type in {"image/jpeg", "image/png"} else "assistants"
|
|
78
74
|
|
|
79
75
|
try:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
else:
|
|
91
|
-
# raw bytes
|
|
92
|
-
file_upload = client.files.create(
|
|
93
|
-
file=(file_name, file_path_or_bytes, mime_type),
|
|
94
|
-
purpose=purpose
|
|
95
|
-
)
|
|
76
|
+
if isinstance(file_path_or_bytes, str):
|
|
77
|
+
file_upload = client.files.create(
|
|
78
|
+
file=open(file_path_or_bytes, "rb"),
|
|
79
|
+
purpose=purpose,
|
|
80
|
+
)
|
|
81
|
+
elif isinstance(file_path_or_bytes, bytes):
|
|
82
|
+
file_upload = client.files.create(
|
|
83
|
+
file=(file_name, file_path_or_bytes, mime_type),
|
|
84
|
+
purpose=purpose,
|
|
85
|
+
)
|
|
96
86
|
else:
|
|
97
87
|
raise ValueError("Unknown file content type. Must be path or bytes.")
|
|
98
88
|
|
|
99
89
|
if purpose == "assistants" and vector_store_id:
|
|
100
90
|
client.vector_stores.files.create(
|
|
101
|
-
vector_store_id=vector_store_id,
|
|
102
|
-
file_id=file_upload.id
|
|
91
|
+
vector_store_id=vector_store_id, file_id=file_upload.id
|
|
103
92
|
)
|
|
104
93
|
return file_upload
|
|
105
94
|
except Exception as e:
|
|
@@ -111,29 +100,22 @@ async def upload_file_openai(
|
|
|
111
100
|
file_path_or_bytes: Any,
|
|
112
101
|
file_name: str,
|
|
113
102
|
mime_type: str,
|
|
114
|
-
tool_config: Optional[List[Dict]] = None
|
|
103
|
+
tool_config: Optional[List[Dict]] = None,
|
|
115
104
|
):
|
|
116
|
-
"""
|
|
117
|
-
|
|
118
|
-
"""
|
|
119
|
-
openai_key = get_openai_access_token(tool_config)
|
|
120
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
121
|
-
|
|
122
|
-
purpose = "assistants"
|
|
123
|
-
if mime_type in ["image/jpeg", "image/png"]:
|
|
124
|
-
purpose = "vision"
|
|
105
|
+
"""Upload a standalone file (not attached to a vector store)."""
|
|
106
|
+
client = create_openai_client(tool_config)
|
|
107
|
+
purpose = "vision" if mime_type in {"image/jpeg", "image/png"} else "assistants"
|
|
125
108
|
|
|
126
109
|
try:
|
|
127
110
|
if isinstance(file_path_or_bytes, str):
|
|
128
|
-
# treat as local path
|
|
129
111
|
file_upload = client.files.create(
|
|
130
112
|
file=open(file_path_or_bytes, "rb"),
|
|
131
|
-
purpose=purpose
|
|
113
|
+
purpose=purpose,
|
|
132
114
|
)
|
|
133
115
|
else:
|
|
134
116
|
file_upload = client.files.create(
|
|
135
117
|
file=(file_name, file_path_or_bytes, mime_type),
|
|
136
|
-
purpose=purpose
|
|
118
|
+
purpose=purpose,
|
|
137
119
|
)
|
|
138
120
|
return file_upload
|
|
139
121
|
except Exception as e:
|
|
@@ -144,180 +126,142 @@ async def upload_file_openai(
|
|
|
144
126
|
async def attach_file_to_vector_store(
|
|
145
127
|
file_id: str,
|
|
146
128
|
vector_store_id: str,
|
|
147
|
-
tool_config: Optional[List[Dict]] = None
|
|
129
|
+
tool_config: Optional[List[Dict]] = None,
|
|
148
130
|
):
|
|
149
|
-
"""
|
|
150
|
-
|
|
151
|
-
"""
|
|
152
|
-
openai_key = get_openai_access_token(tool_config)
|
|
153
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
131
|
+
"""Attach an already-uploaded file to a vector store."""
|
|
132
|
+
client = create_openai_client(tool_config)
|
|
154
133
|
try:
|
|
155
|
-
|
|
156
|
-
vector_store_id=vector_store_id,
|
|
157
|
-
file_id=file_id
|
|
134
|
+
return client.vector_stores.files.create(
|
|
135
|
+
vector_store_id=vector_store_id, file_id=file_id
|
|
158
136
|
)
|
|
159
|
-
return response
|
|
160
137
|
except Exception as e:
|
|
161
|
-
logging.error(
|
|
138
|
+
logging.error(
|
|
139
|
+
f"Error attaching file {file_id} to vector store {vector_store_id}: {e}"
|
|
140
|
+
)
|
|
162
141
|
raise HTTPException(status_code=400, detail=str(e))
|
|
163
142
|
|
|
164
143
|
|
|
165
144
|
async def delete_files(
|
|
166
145
|
file_ids: List[str],
|
|
167
146
|
vector_store_id: Optional[str] = None,
|
|
168
|
-
tool_config: Optional[List[Dict]] = None
|
|
147
|
+
tool_config: Optional[List[Dict]] = None,
|
|
169
148
|
):
|
|
170
|
-
"""
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
175
|
-
for file_id in file_ids:
|
|
149
|
+
"""Delete files from vector store (if given) and OpenAI storage."""
|
|
150
|
+
client = create_openai_client(tool_config)
|
|
151
|
+
|
|
152
|
+
for fid in file_ids:
|
|
176
153
|
try:
|
|
177
154
|
if vector_store_id:
|
|
178
|
-
client.vector_stores.files.delete(
|
|
179
|
-
|
|
155
|
+
client.vector_stores.files.delete(
|
|
156
|
+
vector_store_id=vector_store_id, file_id=fid
|
|
157
|
+
)
|
|
158
|
+
client.files.delete(file_id=fid)
|
|
180
159
|
except openai.NotFoundError:
|
|
181
|
-
logging.warning(f"File not found: {
|
|
160
|
+
logging.warning(f"File not found: {fid}")
|
|
182
161
|
except Exception as e:
|
|
183
|
-
logging.error(f"Error deleting file {
|
|
162
|
+
logging.error(f"Error deleting file {fid}: {e}\n{traceback.format_exc()}")
|
|
184
163
|
|
|
164
|
+
# ---------------------------------------------------------------------------
|
|
165
|
+
# RAG / Responses helpers
|
|
166
|
+
# ---------------------------------------------------------------------------
|
|
185
167
|
|
|
186
|
-
# -----------------------------------------------------------------------------
|
|
187
|
-
# Using File Search (RAG) with the Responses API
|
|
188
|
-
# -----------------------------------------------------------------------------
|
|
189
168
|
|
|
190
169
|
async def run_file_search(
|
|
191
170
|
query: str,
|
|
192
171
|
vector_store_id: str,
|
|
193
|
-
model: str = "gpt-
|
|
172
|
+
model: str = "gpt-5.1-chat",
|
|
194
173
|
max_num_results: int = 5,
|
|
195
174
|
store: bool = True,
|
|
196
|
-
tool_config: Optional[List[Dict]] = None
|
|
175
|
+
tool_config: Optional[List[Dict]] = None,
|
|
197
176
|
) -> Dict[str, Any]:
|
|
198
|
-
"""
|
|
199
|
-
|
|
200
|
-
- 'query': user question
|
|
201
|
-
- 'vector_store_id': the store where the PDFs are embedded
|
|
202
|
-
- 'model': which model to use for generating the final answer
|
|
203
|
-
- 'max_num_results': how many relevant chunks to retrieve
|
|
204
|
-
- 'store': whether to store the response on the OpenAI side
|
|
205
|
-
Returns a dict with:
|
|
206
|
-
{
|
|
207
|
-
"answer": str, # the text answer from the LLM
|
|
208
|
-
"retrieved_files": list, # the top-k filenames used
|
|
209
|
-
"annotations": list # any chunk-level annotations
|
|
210
|
-
}
|
|
211
|
-
"""
|
|
212
|
-
openai_key = get_openai_access_token(tool_config)
|
|
213
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
177
|
+
"""Single-shot file_search + answer with the new Responses API."""
|
|
178
|
+
client = create_openai_client(tool_config)
|
|
214
179
|
|
|
215
180
|
try:
|
|
216
|
-
|
|
181
|
+
rsp = client.responses.create(
|
|
217
182
|
input=query,
|
|
218
183
|
model=model,
|
|
219
184
|
store=store,
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
185
|
+
tools=[
|
|
186
|
+
{
|
|
187
|
+
"type": "file_search",
|
|
188
|
+
"vector_store_ids": [vector_store_id],
|
|
189
|
+
"max_num_results": max_num_results,
|
|
190
|
+
}
|
|
191
|
+
],
|
|
226
192
|
)
|
|
227
193
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
fs_content = response.output[1].content[0]
|
|
233
|
-
answer_text = fs_content.text
|
|
234
|
-
annotations = fs_content.annotations # chunk-level info (filename, snippet, etc.)
|
|
235
|
-
|
|
236
|
-
retrieved_files = []
|
|
237
|
-
if annotations:
|
|
238
|
-
retrieved_files = list({result.filename for result in annotations})
|
|
239
|
-
|
|
194
|
+
if len(rsp.output) > 1 and rsp.output[1].content:
|
|
195
|
+
fs_chunk = rsp.output[1].content[0]
|
|
196
|
+
annotations = fs_chunk.annotations or []
|
|
197
|
+
retrieved_files = list({ann.filename for ann in annotations})
|
|
240
198
|
return {
|
|
241
|
-
"answer":
|
|
199
|
+
"answer": fs_chunk.text,
|
|
242
200
|
"retrieved_files": retrieved_files,
|
|
243
|
-
"annotations": annotations
|
|
244
|
-
}
|
|
245
|
-
else:
|
|
246
|
-
# If for some reason no file_search step was generated:
|
|
247
|
-
return {
|
|
248
|
-
"answer": response.output_text, # fallback
|
|
249
|
-
"retrieved_files": [],
|
|
250
|
-
"annotations": []
|
|
201
|
+
"annotations": annotations,
|
|
251
202
|
}
|
|
203
|
+
|
|
204
|
+
return {
|
|
205
|
+
"answer": rsp.output_text,
|
|
206
|
+
"retrieved_files": [],
|
|
207
|
+
"annotations": [],
|
|
208
|
+
}
|
|
252
209
|
except Exception as e:
|
|
253
210
|
logging.error(f"Error in run_file_search: {e}\n{traceback.format_exc()}")
|
|
254
211
|
raise HTTPException(status_code=400, detail=str(e))
|
|
255
212
|
|
|
256
213
|
|
|
257
|
-
# -----------------------------------------------------------------------------
|
|
258
|
-
# Additional “Responses” Helpers (e.g., run_response_text, run_response_structured)
|
|
259
|
-
# -----------------------------------------------------------------------------
|
|
260
|
-
|
|
261
214
|
async def run_response_text(
|
|
262
215
|
prompt: str,
|
|
263
|
-
model: str = "gpt-
|
|
216
|
+
model: str = "gpt-5.1-chat",
|
|
264
217
|
max_tokens: int = 2048,
|
|
265
218
|
store: bool = True,
|
|
266
|
-
tool_config: Optional[List[Dict]] = None
|
|
219
|
+
tool_config: Optional[List[Dict]] = None,
|
|
267
220
|
) -> (str, str):
|
|
268
|
-
"""
|
|
269
|
-
|
|
270
|
-
Returns (answer, status).
|
|
271
|
-
"""
|
|
272
|
-
openai_key = get_openai_access_token(tool_config)
|
|
273
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
221
|
+
"""Plain text completion via the Responses API."""
|
|
222
|
+
client = create_openai_client(tool_config)
|
|
274
223
|
|
|
275
224
|
try:
|
|
276
|
-
|
|
225
|
+
rsp = client.responses.create(
|
|
277
226
|
input=[{"role": "user", "content": prompt}],
|
|
278
227
|
model=model,
|
|
279
228
|
max_tokens=max_tokens,
|
|
280
|
-
store=store
|
|
229
|
+
store=store,
|
|
281
230
|
)
|
|
282
|
-
return
|
|
231
|
+
return rsp.output_text, "success"
|
|
283
232
|
except Exception as e:
|
|
284
233
|
logging.error(f"Error in run_response_text: {e}\n{traceback.format_exc()}")
|
|
285
|
-
return
|
|
234
|
+
return f"An error occurred: {e}", "error"
|
|
286
235
|
|
|
287
236
|
|
|
288
237
|
async def run_response_structured(
|
|
289
238
|
prompt: str,
|
|
290
239
|
response_format: dict,
|
|
291
|
-
model: str = "gpt-
|
|
240
|
+
model: str = "gpt-5.1-chat",
|
|
292
241
|
max_tokens: int = 1024,
|
|
293
242
|
store: bool = True,
|
|
294
|
-
tool_config: Optional[List[Dict]] = None
|
|
243
|
+
tool_config: Optional[List[Dict]] = None,
|
|
295
244
|
) -> (Any, str):
|
|
296
|
-
"""
|
|
297
|
-
|
|
298
|
-
"""
|
|
299
|
-
openai_key = get_openai_access_token(tool_config)
|
|
300
|
-
client = openai.OpenAI(api_key=openai_key)
|
|
245
|
+
"""Structured JSON output via Responses API."""
|
|
246
|
+
client = create_openai_client(tool_config)
|
|
301
247
|
|
|
302
248
|
try:
|
|
303
|
-
|
|
249
|
+
rsp = client.responses.create(
|
|
304
250
|
input=[{"role": "user", "content": prompt}],
|
|
305
251
|
model=model,
|
|
306
252
|
max_tokens=max_tokens,
|
|
307
253
|
store=store,
|
|
308
|
-
text={"format": response_format}
|
|
254
|
+
text={"format": response_format},
|
|
309
255
|
)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
raw_text = response.output[0].content[0].text
|
|
256
|
+
if rsp.output:
|
|
257
|
+
raw = rsp.output[0].content[0].text
|
|
313
258
|
try:
|
|
314
|
-
|
|
315
|
-
return parsed, "success"
|
|
259
|
+
return json.loads(raw), "success"
|
|
316
260
|
except json.JSONDecodeError:
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
else:
|
|
320
|
-
return "No output returned", "error"
|
|
261
|
+
return raw, "error"
|
|
262
|
+
return "No output returned", "error"
|
|
321
263
|
except Exception as e:
|
|
322
|
-
logging.error(
|
|
264
|
+
logging.error(
|
|
265
|
+
f"Error in run_response_structured: {e}\n{traceback.format_exc()}"
|
|
266
|
+
)
|
|
323
267
|
return f"An error occurred: {e}", "error"
|
dhisana/utils/openai_helpers.py
CHANGED
|
@@ -12,7 +12,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Type
|
|
|
12
12
|
|
|
13
13
|
from pydantic import BaseModel, Field, create_model
|
|
14
14
|
from fastapi import HTTPException
|
|
15
|
-
from openai import AsyncOpenAI, OpenAIError,
|
|
15
|
+
from openai import AsyncOpenAI, OpenAIError, pydantic_function_tool
|
|
16
16
|
|
|
17
17
|
from dhisana.utils import cache_output_tools
|
|
18
18
|
# from dhisana.utils.trasform_json import GLOBAL_GENERATED_PYTHON_CODE
|
|
@@ -44,7 +44,7 @@ def get_openai_access_token(tool_config: Optional[List[Dict]] = None) -> str:
|
|
|
44
44
|
str: The OPENAI_API_KEY access token.
|
|
45
45
|
|
|
46
46
|
Raises:
|
|
47
|
-
ValueError: If the
|
|
47
|
+
ValueError: If the OpenAI integration has not been configured.
|
|
48
48
|
"""
|
|
49
49
|
if tool_config:
|
|
50
50
|
openai_config = next(
|
|
@@ -64,7 +64,9 @@ def get_openai_access_token(tool_config: Optional[List[Dict]] = None) -> str:
|
|
|
64
64
|
|
|
65
65
|
OPENAI_API_KEY = OPENAI_API_KEY or os.getenv("OPENAI_API_KEY")
|
|
66
66
|
if not OPENAI_API_KEY:
|
|
67
|
-
raise ValueError(
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"OpenAI integration is not configured. Please configure the connection to OpenAI in Integrations."
|
|
69
|
+
)
|
|
68
70
|
return OPENAI_API_KEY
|
|
69
71
|
|
|
70
72
|
async def read_from_google_drive(path):
|
|
@@ -328,7 +330,7 @@ async def process_agent_request(row_batch: List[Dict], workflow: Dict, custom_in
|
|
|
328
330
|
name="AI Assistant",
|
|
329
331
|
instructions=instructions,
|
|
330
332
|
tools=[],
|
|
331
|
-
model="
|
|
333
|
+
model="gpt-5.1-chat"
|
|
332
334
|
)
|
|
333
335
|
thread = await client.beta.threads.create()
|
|
334
336
|
parsed_outputs = []
|
|
@@ -480,7 +482,7 @@ async def process_transform_json(task_inputs, response_type, task):
|
|
|
480
482
|
task_id = task.get('id')
|
|
481
483
|
for input_name, input_info in task_inputs.items():
|
|
482
484
|
data_format = input_info.get('format', 'list')
|
|
483
|
-
|
|
485
|
+
input_info.get('transform_function_name', f"{task_id}_transform_input_json")
|
|
484
486
|
items = input_info.get('data')
|
|
485
487
|
if data_format == 'list':
|
|
486
488
|
if items and len(items) > 0:
|
|
@@ -953,7 +955,7 @@ async def get_function_call_arguments(input_text: str, function_name: str) -> Tu
|
|
|
953
955
|
|
|
954
956
|
# Make the API call
|
|
955
957
|
response = await client.beta.chat.completions.parse(
|
|
956
|
-
model="
|
|
958
|
+
model="gpt-5.1-chat",
|
|
957
959
|
messages=[
|
|
958
960
|
{"role": "system", "content": "Extract function arguments in JSON format."},
|
|
959
961
|
{"role": "user", "content": prompt},
|
dhisana/utils/profile.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from pyinstrument import Profiler
|
|
3
|
+
import logging
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any, Awaitable, Dict, List, Optional, TypeVar, Union
|
|
6
|
+
import mdformat
|
|
7
|
+
|
|
8
|
+
# --------------------------------------------------------------------------- #
|
|
9
|
+
# Helper: profile any awaited coroutine and log the timing with its call‑site #
|
|
10
|
+
# --------------------------------------------------------------------------- #
|
|
11
|
+
T = TypeVar("T")
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def profile_async_call(awaitable: Awaitable[T], name: str) -> T:
|
|
16
|
+
"""
|
|
17
|
+
Run *awaitable*, timing it with an ad‑hoc Profiler instance,
|
|
18
|
+
and log the profiler output.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
awaitable: The coroutine to time.
|
|
22
|
+
name: Friendly name to show in the log (e.g. function name).
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
The awaited result, typed to whatever the coroutine yields.
|
|
26
|
+
"""
|
|
27
|
+
profiler = Profiler() # noqa: F821 (assumes Profiler is already imported)
|
|
28
|
+
profiler.start()
|
|
29
|
+
result: T = await awaitable
|
|
30
|
+
profiler.stop()
|
|
31
|
+
|
|
32
|
+
logger.debug(
|
|
33
|
+
"⏱️ Profiled %s\n%s",
|
|
34
|
+
name,
|
|
35
|
+
profiler.output_text(unicode=True, color=True),
|
|
36
|
+
)
|
|
37
|
+
return result
|