dhisana 0.0.1.dev85__py3-none-any.whl → 0.0.1.dev236__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. dhisana/schemas/common.py +33 -0
  2. dhisana/schemas/sales.py +224 -23
  3. dhisana/utils/add_mapping.py +72 -63
  4. dhisana/utils/apollo_tools.py +739 -109
  5. dhisana/utils/built_with_api_tools.py +4 -2
  6. dhisana/utils/cache_output_tools.py +23 -23
  7. dhisana/utils/check_email_validity_tools.py +456 -458
  8. dhisana/utils/check_for_intent_signal.py +1 -2
  9. dhisana/utils/check_linkedin_url_validity.py +34 -8
  10. dhisana/utils/clay_tools.py +3 -2
  11. dhisana/utils/clean_properties.py +3 -1
  12. dhisana/utils/compose_salesnav_query.py +0 -1
  13. dhisana/utils/compose_search_query.py +7 -3
  14. dhisana/utils/composite_tools.py +0 -1
  15. dhisana/utils/dataframe_tools.py +2 -2
  16. dhisana/utils/email_body_utils.py +72 -0
  17. dhisana/utils/email_provider.py +375 -0
  18. dhisana/utils/enrich_lead_information.py +585 -85
  19. dhisana/utils/fetch_openai_config.py +129 -0
  20. dhisana/utils/field_validators.py +1 -1
  21. dhisana/utils/g2_tools.py +0 -1
  22. dhisana/utils/generate_content.py +0 -1
  23. dhisana/utils/generate_email.py +69 -16
  24. dhisana/utils/generate_email_response.py +298 -41
  25. dhisana/utils/generate_flow.py +0 -1
  26. dhisana/utils/generate_linkedin_connect_message.py +19 -6
  27. dhisana/utils/generate_linkedin_response_message.py +156 -65
  28. dhisana/utils/generate_structured_output_internal.py +351 -131
  29. dhisana/utils/google_custom_search.py +150 -44
  30. dhisana/utils/google_oauth_tools.py +721 -0
  31. dhisana/utils/google_workspace_tools.py +391 -25
  32. dhisana/utils/hubspot_clearbit.py +3 -1
  33. dhisana/utils/hubspot_crm_tools.py +771 -167
  34. dhisana/utils/instantly_tools.py +3 -1
  35. dhisana/utils/lusha_tools.py +10 -7
  36. dhisana/utils/mailgun_tools.py +150 -0
  37. dhisana/utils/microsoft365_tools.py +447 -0
  38. dhisana/utils/openai_assistant_and_file_utils.py +121 -177
  39. dhisana/utils/openai_helpers.py +19 -16
  40. dhisana/utils/parse_linkedin_messages_txt.py +2 -3
  41. dhisana/utils/profile.py +37 -0
  42. dhisana/utils/proxy_curl_tools.py +507 -206
  43. dhisana/utils/proxycurl_search_leads.py +426 -0
  44. dhisana/utils/research_lead.py +121 -68
  45. dhisana/utils/sales_navigator_crawler.py +1 -6
  46. dhisana/utils/salesforce_crm_tools.py +323 -50
  47. dhisana/utils/search_router.py +131 -0
  48. dhisana/utils/search_router_jobs.py +51 -0
  49. dhisana/utils/sendgrid_tools.py +126 -91
  50. dhisana/utils/serarch_router_local_business.py +75 -0
  51. dhisana/utils/serpapi_additional_tools.py +290 -0
  52. dhisana/utils/serpapi_google_jobs.py +117 -0
  53. dhisana/utils/serpapi_google_search.py +188 -0
  54. dhisana/utils/serpapi_local_business_search.py +129 -0
  55. dhisana/utils/serpapi_search_tools.py +363 -432
  56. dhisana/utils/serperdev_google_jobs.py +125 -0
  57. dhisana/utils/serperdev_local_business.py +154 -0
  58. dhisana/utils/serperdev_search.py +233 -0
  59. dhisana/utils/smtp_email_tools.py +576 -0
  60. dhisana/utils/test_connect.py +1765 -92
  61. dhisana/utils/trasform_json.py +95 -16
  62. dhisana/utils/web_download_parse_tools.py +0 -1
  63. dhisana/utils/zoominfo_tools.py +2 -3
  64. dhisana/workflow/test.py +1 -1
  65. {dhisana-0.0.1.dev85.dist-info → dhisana-0.0.1.dev236.dist-info}/METADATA +5 -2
  66. dhisana-0.0.1.dev236.dist-info/RECORD +100 -0
  67. {dhisana-0.0.1.dev85.dist-info → dhisana-0.0.1.dev236.dist-info}/WHEEL +1 -1
  68. dhisana-0.0.1.dev85.dist-info/RECORD +0 -81
  69. {dhisana-0.0.1.dev85.dist-info → dhisana-0.0.1.dev236.dist-info}/entry_points.txt +0 -0
  70. {dhisana-0.0.1.dev85.dist-info → dhisana-0.0.1.dev236.dist-info}/top_level.txt +0 -0
@@ -1,39 +1,43 @@
1
+ """
2
+ Vector-store and file helpers that work with **either** OpenAI or Azure OpenAI,
3
+ using the shared factory functions defined in `dhisana.utils.fetch_openai_config`.
4
+
5
+ Only the client initialisation lines changed; all business logic is untouched.
6
+ """
7
+
1
8
  import json
2
9
  import logging
3
10
  import re
4
11
  import traceback
5
- from typing import Optional, List, Any, Dict
12
+ from typing import Any, Dict, List, Optional
6
13
 
7
14
  from fastapi import HTTPException
8
- from pydantic import BaseModel
9
- import openai
10
- from dhisana.utils.openai_helpers import get_openai_access_token
11
15
 
16
+ import openai # still needed for openai.NotFoundError
17
+ from dhisana.utils.fetch_openai_config import (
18
+ create_openai_client, # synchronous client
19
+ )
20
+
21
+ # ---------------------------------------------------------------------------
22
+ # Vector-store helpers
23
+ # ---------------------------------------------------------------------------
12
24
 
13
- # -----------------------------------------------------------------------------
14
- # Vector Store Helpers
15
- # -----------------------------------------------------------------------------
16
25
 
17
26
  async def create_vector_store(
18
27
  vector_store_name: str,
19
- tool_config: Optional[List[Dict]] = None
28
+ tool_config: Optional[List[Dict]] = None,
20
29
  ) -> Dict[str, Any]:
21
- """
22
- Create a new vector store with a cleaned name. Returns the vector store details.
23
- """
24
- normalized_name = vector_store_name.lower()
25
- normalized_name = re.sub(r'[^a-z0-9_]+', '_', normalized_name)
26
- normalized_name = normalized_name[:64]
27
- openai_key = get_openai_access_token(tool_config)
28
-
29
- client = openai.OpenAI(api_key=openai_key)
30
+ """Create a new vector store and return its metadata."""
31
+ normalized_name = re.sub(r"[^a-z0-9_]+", "_", vector_store_name.lower())[:64]
32
+ client = create_openai_client(tool_config)
33
+
30
34
  try:
31
- vector_store = client.vector_stores.create(name=normalized_name)
35
+ vs = client.vector_stores.create(name=normalized_name)
32
36
  return {
33
- "id": vector_store.id,
34
- "name": vector_store.name,
35
- "created_at": vector_store.created_at,
36
- "file_count": vector_store.file_counts.completed
37
+ "id": vs.id,
38
+ "name": vs.name,
39
+ "created_at": vs.created_at,
40
+ "file_count": vs.file_counts.completed,
37
41
  }
38
42
  except Exception as e:
39
43
  logging.error(f"Error creating vector store: {e}\n{traceback.format_exc()}")
@@ -42,64 +46,49 @@ async def create_vector_store(
42
46
 
43
47
  async def delete_vector_store(
44
48
  vector_store_id: str,
45
- tool_config: Optional[List[Dict]] = None
49
+ tool_config: Optional[List[Dict]] = None,
46
50
  ) -> None:
47
- """
48
- Delete a vector store by ID.
49
- """
50
- openai_key = get_openai_access_token(tool_config)
51
- client = openai.OpenAI(api_key=openai_key)
51
+ """Delete a vector store by ID."""
52
+ client = create_openai_client(tool_config)
52
53
  try:
53
54
  client.vector_stores.delete(vector_store_id=vector_store_id)
54
55
  except Exception as e:
55
56
  logging.error(f"Error deleting vector store {vector_store_id}: {e}")
56
57
  raise HTTPException(status_code=400, detail=str(e))
57
58
 
59
+ # ---------------------------------------------------------------------------
60
+ # File-upload helpers
61
+ # ---------------------------------------------------------------------------
58
62
 
59
- # -----------------------------------------------------------------------------
60
- # File Upload Helpers
61
- # -----------------------------------------------------------------------------
62
63
 
63
64
  async def upload_file_openai_and_vector_store(
64
65
  file_path_or_bytes: Any,
65
66
  file_name: str,
66
67
  mime_type: str,
67
68
  vector_store_id: str,
68
- tool_config: Optional[List[Dict]] = None
69
+ tool_config: Optional[List[Dict]] = None,
69
70
  ):
70
- """
71
- Upload a local file or bytes to OpenAI, then attach to a vector store.
72
- """
73
- openai_key = get_openai_access_token(tool_config)
74
- client = openai.OpenAI(api_key=openai_key)
75
- purpose = "assistants"
76
- if mime_type in ["image/jpeg", "image/png"]:
77
- purpose = "vision"
71
+ """Upload a file and attach it to a vector store (purpose = assistants / vision)."""
72
+ client = create_openai_client(tool_config)
73
+ purpose = "vision" if mime_type in {"image/jpeg", "image/png"} else "assistants"
78
74
 
79
75
  try:
80
- # file=(filename, file_content, mime_type) if bytes
81
- # or file=open(file_path, "rb") if local path
82
- file_upload = None
83
- if isinstance(file_path_or_bytes, (str, bytes)):
84
- # If string path, open as binary
85
- if isinstance(file_path_or_bytes, str):
86
- file_upload = client.files.create(
87
- file=open(file_path_or_bytes, "rb"),
88
- purpose=purpose
89
- )
90
- else:
91
- # raw bytes
92
- file_upload = client.files.create(
93
- file=(file_name, file_path_or_bytes, mime_type),
94
- purpose=purpose
95
- )
76
+ if isinstance(file_path_or_bytes, str):
77
+ file_upload = client.files.create(
78
+ file=open(file_path_or_bytes, "rb"),
79
+ purpose=purpose,
80
+ )
81
+ elif isinstance(file_path_or_bytes, bytes):
82
+ file_upload = client.files.create(
83
+ file=(file_name, file_path_or_bytes, mime_type),
84
+ purpose=purpose,
85
+ )
96
86
  else:
97
87
  raise ValueError("Unknown file content type. Must be path or bytes.")
98
88
 
99
89
  if purpose == "assistants" and vector_store_id:
100
90
  client.vector_stores.files.create(
101
- vector_store_id=vector_store_id,
102
- file_id=file_upload.id
91
+ vector_store_id=vector_store_id, file_id=file_upload.id
103
92
  )
104
93
  return file_upload
105
94
  except Exception as e:
@@ -111,29 +100,22 @@ async def upload_file_openai(
111
100
  file_path_or_bytes: Any,
112
101
  file_name: str,
113
102
  mime_type: str,
114
- tool_config: Optional[List[Dict]] = None
103
+ tool_config: Optional[List[Dict]] = None,
115
104
  ):
116
- """
117
- Upload a file to OpenAI (not attached to a vector store).
118
- """
119
- openai_key = get_openai_access_token(tool_config)
120
- client = openai.OpenAI(api_key=openai_key)
121
-
122
- purpose = "assistants"
123
- if mime_type in ["image/jpeg", "image/png"]:
124
- purpose = "vision"
105
+ """Upload a standalone file (not attached to a vector store)."""
106
+ client = create_openai_client(tool_config)
107
+ purpose = "vision" if mime_type in {"image/jpeg", "image/png"} else "assistants"
125
108
 
126
109
  try:
127
110
  if isinstance(file_path_or_bytes, str):
128
- # treat as local path
129
111
  file_upload = client.files.create(
130
112
  file=open(file_path_or_bytes, "rb"),
131
- purpose=purpose
113
+ purpose=purpose,
132
114
  )
133
115
  else:
134
116
  file_upload = client.files.create(
135
117
  file=(file_name, file_path_or_bytes, mime_type),
136
- purpose=purpose
118
+ purpose=purpose,
137
119
  )
138
120
  return file_upload
139
121
  except Exception as e:
@@ -144,180 +126,142 @@ async def upload_file_openai(
144
126
  async def attach_file_to_vector_store(
145
127
  file_id: str,
146
128
  vector_store_id: str,
147
- tool_config: Optional[List[Dict]] = None
129
+ tool_config: Optional[List[Dict]] = None,
148
130
  ):
149
- """
150
- Attach an already uploaded file to a vector store.
151
- """
152
- openai_key = get_openai_access_token(tool_config)
153
- client = openai.OpenAI(api_key=openai_key)
131
+ """Attach an already-uploaded file to a vector store."""
132
+ client = create_openai_client(tool_config)
154
133
  try:
155
- response = client.vector_stores.files.create(
156
- vector_store_id=vector_store_id,
157
- file_id=file_id
134
+ return client.vector_stores.files.create(
135
+ vector_store_id=vector_store_id, file_id=file_id
158
136
  )
159
- return response
160
137
  except Exception as e:
161
- logging.error(f"Error attaching file {file_id} to vector store {vector_store_id}: {e}")
138
+ logging.error(
139
+ f"Error attaching file {file_id} to vector store {vector_store_id}: {e}"
140
+ )
162
141
  raise HTTPException(status_code=400, detail=str(e))
163
142
 
164
143
 
165
144
  async def delete_files(
166
145
  file_ids: List[str],
167
146
  vector_store_id: Optional[str] = None,
168
- tool_config: Optional[List[Dict]] = None
147
+ tool_config: Optional[List[Dict]] = None,
169
148
  ):
170
- """
171
- Delete files from the vector store and from OpenAI's file storage.
172
- """
173
- openai_key = get_openai_access_token(tool_config)
174
- client = openai.OpenAI(api_key=openai_key)
175
- for file_id in file_ids:
149
+ """Delete files from vector store (if given) and OpenAI storage."""
150
+ client = create_openai_client(tool_config)
151
+
152
+ for fid in file_ids:
176
153
  try:
177
154
  if vector_store_id:
178
- client.vector_stores.files.delete(vector_store_id=vector_store_id, file_id=file_id)
179
- client.files.delete(file_id=file_id)
155
+ client.vector_stores.files.delete(
156
+ vector_store_id=vector_store_id, file_id=fid
157
+ )
158
+ client.files.delete(file_id=fid)
180
159
  except openai.NotFoundError:
181
- logging.warning(f"File not found: {file_id}")
160
+ logging.warning(f"File not found: {fid}")
182
161
  except Exception as e:
183
- logging.error(f"Error deleting file {file_id}: {e}\n{traceback.format_exc()}")
162
+ logging.error(f"Error deleting file {fid}: {e}\n{traceback.format_exc()}")
184
163
 
164
+ # ---------------------------------------------------------------------------
165
+ # RAG / Responses helpers
166
+ # ---------------------------------------------------------------------------
185
167
 
186
- # -----------------------------------------------------------------------------
187
- # Using File Search (RAG) with the Responses API
188
- # -----------------------------------------------------------------------------
189
168
 
190
169
  async def run_file_search(
191
170
  query: str,
192
171
  vector_store_id: str,
193
- model: str = "gpt-4o-mini",
172
+ model: str = "gpt-5.1-chat",
194
173
  max_num_results: int = 5,
195
174
  store: bool = True,
196
- tool_config: Optional[List[Dict]] = None
175
+ tool_config: Optional[List[Dict]] = None,
197
176
  ) -> Dict[str, Any]:
198
- """
199
- Execute a single call to the new OpenAI Responses API, using the file_search tool.
200
- - 'query': user question
201
- - 'vector_store_id': the store where the PDFs are embedded
202
- - 'model': which model to use for generating the final answer
203
- - 'max_num_results': how many relevant chunks to retrieve
204
- - 'store': whether to store the response on the OpenAI side
205
- Returns a dict with:
206
- {
207
- "answer": str, # the text answer from the LLM
208
- "retrieved_files": list, # the top-k filenames used
209
- "annotations": list # any chunk-level annotations
210
- }
211
- """
212
- openai_key = get_openai_access_token(tool_config)
213
- client = openai.OpenAI(api_key=openai_key)
177
+ """Single-shot file_search + answer with the new Responses API."""
178
+ client = create_openai_client(tool_config)
214
179
 
215
180
  try:
216
- response = client.responses.create(
181
+ rsp = client.responses.create(
217
182
  input=query,
218
183
  model=model,
219
184
  store=store,
220
- # Provide the file_search tool with vector_store_ids
221
- tools=[{
222
- "type": "file_search",
223
- "vector_store_ids": [vector_store_id],
224
- "max_num_results": max_num_results,
225
- }]
185
+ tools=[
186
+ {
187
+ "type": "file_search",
188
+ "vector_store_ids": [vector_store_id],
189
+ "max_num_results": max_num_results,
190
+ }
191
+ ],
226
192
  )
227
193
 
228
- # By default, the file_search call is placed into output[1].
229
- # The first chunk in output[1].content can contain both text and annotations.
230
- if len(response.output) > 1 and response.output[1].content:
231
- # The text & annotations from the file_search step:
232
- fs_content = response.output[1].content[0]
233
- answer_text = fs_content.text
234
- annotations = fs_content.annotations # chunk-level info (filename, snippet, etc.)
235
-
236
- retrieved_files = []
237
- if annotations:
238
- retrieved_files = list({result.filename for result in annotations})
239
-
194
+ if len(rsp.output) > 1 and rsp.output[1].content:
195
+ fs_chunk = rsp.output[1].content[0]
196
+ annotations = fs_chunk.annotations or []
197
+ retrieved_files = list({ann.filename for ann in annotations})
240
198
  return {
241
- "answer": answer_text,
199
+ "answer": fs_chunk.text,
242
200
  "retrieved_files": retrieved_files,
243
- "annotations": annotations
244
- }
245
- else:
246
- # If for some reason no file_search step was generated:
247
- return {
248
- "answer": response.output_text, # fallback
249
- "retrieved_files": [],
250
- "annotations": []
201
+ "annotations": annotations,
251
202
  }
203
+
204
+ return {
205
+ "answer": rsp.output_text,
206
+ "retrieved_files": [],
207
+ "annotations": [],
208
+ }
252
209
  except Exception as e:
253
210
  logging.error(f"Error in run_file_search: {e}\n{traceback.format_exc()}")
254
211
  raise HTTPException(status_code=400, detail=str(e))
255
212
 
256
213
 
257
- # -----------------------------------------------------------------------------
258
- # Additional “Responses” Helpers (e.g., run_response_text, run_response_structured)
259
- # -----------------------------------------------------------------------------
260
-
261
214
  async def run_response_text(
262
215
  prompt: str,
263
- model: str = "gpt-4o-mini",
216
+ model: str = "gpt-5.1-chat",
264
217
  max_tokens: int = 2048,
265
218
  store: bool = True,
266
- tool_config: Optional[List[Dict]] = None
219
+ tool_config: Optional[List[Dict]] = None,
267
220
  ) -> (str, str):
268
- """
269
- Simple text completion with the new Responses API.
270
- Returns (answer, status).
271
- """
272
- openai_key = get_openai_access_token(tool_config)
273
- client = openai.OpenAI(api_key=openai_key)
221
+ """Plain text completion via the Responses API."""
222
+ client = create_openai_client(tool_config)
274
223
 
275
224
  try:
276
- response = client.responses.create(
225
+ rsp = client.responses.create(
277
226
  input=[{"role": "user", "content": prompt}],
278
227
  model=model,
279
228
  max_tokens=max_tokens,
280
- store=store
229
+ store=store,
281
230
  )
282
- return response.output_text, "success"
231
+ return rsp.output_text, "success"
283
232
  except Exception as e:
284
233
  logging.error(f"Error in run_response_text: {e}\n{traceback.format_exc()}")
285
- return (f"An error occurred: {e}", "error")
234
+ return f"An error occurred: {e}", "error"
286
235
 
287
236
 
288
237
  async def run_response_structured(
289
238
  prompt: str,
290
239
  response_format: dict,
291
- model: str = "gpt-4o-mini",
240
+ model: str = "gpt-5.1-chat",
292
241
  max_tokens: int = 1024,
293
242
  store: bool = True,
294
- tool_config: Optional[List[Dict]] = None
243
+ tool_config: Optional[List[Dict]] = None,
295
244
  ) -> (Any, str):
296
- """
297
- Structured output example. Provide a JSON schema or other format in text={"format": ...}.
298
- """
299
- openai_key = get_openai_access_token(tool_config)
300
- client = openai.OpenAI(api_key=openai_key)
245
+ """Structured JSON output via Responses API."""
246
+ client = create_openai_client(tool_config)
301
247
 
302
248
  try:
303
- response = client.responses.create(
249
+ rsp = client.responses.create(
304
250
  input=[{"role": "user", "content": prompt}],
305
251
  model=model,
306
252
  max_tokens=max_tokens,
307
253
  store=store,
308
- text={"format": response_format}
254
+ text={"format": response_format},
309
255
  )
310
- # If we assume the JSON is in the first output chunk’s .text
311
- if response.output and len(response.output) > 0:
312
- raw_text = response.output[0].content[0].text
256
+ if rsp.output:
257
+ raw = rsp.output[0].content[0].text
313
258
  try:
314
- parsed = json.loads(raw_text)
315
- return parsed, "success"
259
+ return json.loads(raw), "success"
316
260
  except json.JSONDecodeError:
317
- # Possibly the model returned partial or invalid JSON
318
- return raw_text, "error"
319
- else:
320
- return "No output returned", "error"
261
+ return raw, "error"
262
+ return "No output returned", "error"
321
263
  except Exception as e:
322
- logging.error(f"Error in run_response_structured: {e}\n{traceback.format_exc()}")
264
+ logging.error(
265
+ f"Error in run_response_structured: {e}\n{traceback.format_exc()}"
266
+ )
323
267
  return f"An error occurred: {e}", "error"
@@ -12,10 +12,10 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Type
12
12
 
13
13
  from pydantic import BaseModel, Field, create_model
14
14
  from fastapi import HTTPException
15
- from openai import AsyncOpenAI, OpenAIError, LengthFinishReasonError, pydantic_function_tool
15
+ from openai import AsyncOpenAI, OpenAIError, pydantic_function_tool
16
16
 
17
17
  from dhisana.utils import cache_output_tools
18
- from dhisana.utils.trasform_json import GLOBAL_GENERATED_PYTHON_CODE, transform_json_with_type
18
+ # from dhisana.utils.trasform_json import GLOBAL_GENERATED_PYTHON_CODE
19
19
 
20
20
  from .agent_tools import GLOBAL_DATA_MODELS, GLOBAL_TOOLS_FUNCTIONS
21
21
  from .google_workspace_tools import get_file_content_from_googledrive_by_name, write_content_to_googledrive
@@ -44,7 +44,7 @@ def get_openai_access_token(tool_config: Optional[List[Dict]] = None) -> str:
44
44
  str: The OPENAI_API_KEY access token.
45
45
 
46
46
  Raises:
47
- ValueError: If the access token is not found in the tool configuration or environment variable.
47
+ ValueError: If the OpenAI integration has not been configured.
48
48
  """
49
49
  if tool_config:
50
50
  openai_config = next(
@@ -64,7 +64,9 @@ def get_openai_access_token(tool_config: Optional[List[Dict]] = None) -> str:
64
64
 
65
65
  OPENAI_API_KEY = OPENAI_API_KEY or os.getenv("OPENAI_API_KEY")
66
66
  if not OPENAI_API_KEY:
67
- raise ValueError("OPENAI_API_KEY access token not found in tool_config or environment variable")
67
+ raise ValueError(
68
+ "OpenAI integration is not configured. Please configure the connection to OpenAI in Integrations."
69
+ )
68
70
  return OPENAI_API_KEY
69
71
 
70
72
  async def read_from_google_drive(path):
@@ -328,7 +330,7 @@ async def process_agent_request(row_batch: List[Dict], workflow: Dict, custom_in
328
330
  name="AI Assistant",
329
331
  instructions=instructions,
330
332
  tools=[],
331
- model="o3-mini"
333
+ model="gpt-5.1-chat"
332
334
  )
333
335
  thread = await client.beta.threads.create()
334
336
  parsed_outputs = []
@@ -480,20 +482,21 @@ async def process_transform_json(task_inputs, response_type, task):
480
482
  task_id = task.get('id')
481
483
  for input_name, input_info in task_inputs.items():
482
484
  data_format = input_info.get('format', 'list')
483
- transform_function_name = input_info.get('transform_function_name', f"{task_id}_transform_input_json")
485
+ input_info.get('transform_function_name', f"{task_id}_transform_input_json")
484
486
  items = input_info.get('data')
485
487
  if data_format == 'list':
486
488
  if items and len(items) > 0:
487
489
  # Generate the transformation function
488
- if GLOBAL_GENERATED_PYTHON_CODE.get(transform_function_name, ''):
489
- transformation_function = GLOBAL_GENERATED_PYTHON_CODE[transform_function_name]
490
- else:
491
- function_name = await transform_json_with_type(
492
- items[0],
493
- response_type,
494
- transform_function_name
495
- )
496
- transformation_function = GLOBAL_GENERATED_PYTHON_CODE[function_name]
490
+ # if GLOBAL_GENERATED_PYTHON_CODE.get(transform_function_name, ''):
491
+ # transformation_function = GLOBAL_GENERATED_PYTHON_CODE[transform_function_name]
492
+ # else:
493
+ # function_name = await transform_json_with_type(
494
+ # items[0],
495
+ # response_type,
496
+ # transform_function_name
497
+ # )
498
+ # transformation_function = GLOBAL_GENERATED_PYTHON_CODE[function_name]
499
+ transformation_function = lambda x: x
497
500
  for item in items:
498
501
  input_json = json.loads(item)
499
502
  output_json = transformation_function(input_json)
@@ -952,7 +955,7 @@ async def get_function_call_arguments(input_text: str, function_name: str) -> Tu
952
955
 
953
956
  # Make the API call
954
957
  response = await client.beta.chat.completions.parse(
955
- model="o3-mini",
958
+ model="gpt-5.1-chat",
956
959
  messages=[
957
960
  {"role": "system", "content": "Extract function arguments in JSON format."},
958
961
  {"role": "user", "content": prompt},
@@ -1,7 +1,5 @@
1
1
  import uuid
2
- import hashlib
3
- from typing import List, Dict, Any
4
- from pydantic import BaseModel, Field
2
+ from typing import List
5
3
  from datetime import datetime, date
6
4
  from dhisana.schemas.sales import MessageItem
7
5
 
@@ -86,6 +84,7 @@ def parse_conversation(conversation_text: str) -> List[MessageItem]:
86
84
  body_text = "\n".join(body_lines).strip()
87
85
  message_item = MessageItem(
88
86
  message_id=str(uuid.uuid4()),
87
+ thread_id=str(uuid.uuid4()),
89
88
  sender_name=sender,
90
89
  sender_email="", # LinkedIn message => keep empty
91
90
  receiver_name="", # keep empty by default
@@ -0,0 +1,37 @@
1
+ from __future__ import annotations
2
+ from pyinstrument import Profiler
3
+ import logging
4
+ import re
5
+ from typing import Any, Awaitable, Dict, List, Optional, TypeVar, Union
6
+ import mdformat
7
+
8
+ # --------------------------------------------------------------------------- #
9
+ # Helper: profile any awaited coroutine and log the timing with its call‑site #
10
+ # --------------------------------------------------------------------------- #
11
+ T = TypeVar("T")
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ async def profile_async_call(awaitable: Awaitable[T], name: str) -> T:
16
+ """
17
+ Run *awaitable*, timing it with an ad‑hoc Profiler instance,
18
+ and log the profiler output.
19
+
20
+ Args:
21
+ awaitable: The coroutine to time.
22
+ name: Friendly name to show in the log (e.g. function name).
23
+
24
+ Returns:
25
+ The awaited result, typed to whatever the coroutine yields.
26
+ """
27
+ profiler = Profiler() # noqa: F821 (assumes Profiler is already imported)
28
+ profiler.start()
29
+ result: T = await awaitable
30
+ profiler.stop()
31
+
32
+ logger.debug(
33
+ "⏱️ Profiled %s\n%s",
34
+ name,
35
+ profiler.output_text(unicode=True, color=True),
36
+ )
37
+ return result