dhisana 0.0.1.dev263__py3-none-any.whl → 0.0.1.dev265__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -165,7 +165,7 @@ async def get_clean_lead_info_with_llm(lead_info_str: str, tool_config: Optional
165
165
  model="gpt-5.1-chat",
166
166
  tool_config=tool_config
167
167
  )
168
- if status == "ERROR":
168
+ if status != "SUCCESS":
169
169
  return {}
170
170
  return lead_info.model_dump()
171
171
 
@@ -12,6 +12,11 @@ from dhisana.utils.generate_structured_output_internal import (
12
12
  from datetime import datetime
13
13
  from pydantic import BaseModel, ConfigDict
14
14
 
15
+ # ---------------------------------------------------------------------------------------
16
+ # CONSTANTS
17
+ # ---------------------------------------------------------------------------------------
18
+ DEFAULT_OPENAI_MODEL = "gpt-4.1"
19
+
15
20
  # -----------------------------------------------------------------------------
16
21
  # Email Copy schema
17
22
  # -----------------------------------------------------------------------------
@@ -191,7 +196,7 @@ async def generate_personalized_email_copy(
191
196
  prompt=initial_prompt,
192
197
  response_format=EmailCopy,
193
198
  vector_store_id=vector_store_id,
194
- model="gpt-5.1-chat",
199
+ model=DEFAULT_OPENAI_MODEL,
195
200
  tool_config=tool_config,
196
201
  use_cache=email_context.message_instructions.use_cache if email_context.message_instructions else True
197
202
  )
@@ -200,7 +205,7 @@ async def generate_personalized_email_copy(
200
205
  initial_response, initial_status = await get_structured_output_internal(
201
206
  prompt=initial_prompt,
202
207
  response_format=EmailCopy,
203
- model="gpt-5.1-chat",
208
+ model=DEFAULT_OPENAI_MODEL,
204
209
  tool_config=tool_config,
205
210
  use_cache=email_context.message_instructions.use_cache if email_context.message_instructions else True
206
211
  )
@@ -16,6 +16,11 @@ from dhisana.utils.generate_structured_output_internal import (
16
16
  get_structured_output_internal
17
17
  )
18
18
 
19
+ # ---------------------------------------------------------------------------------------
20
+ # CONSTANTS
21
+ # ---------------------------------------------------------------------------------------
22
+ DEFAULT_TRIAGE_MODEL = "gpt-4.1"
23
+
19
24
  # ---------------------------------------------------------------------------------------
20
25
  # MODEL
21
26
  # ---------------------------------------------------------------------------------------
@@ -194,7 +199,7 @@ async def get_inbound_email_triage_action(
194
199
  triage_only, status = await get_structured_output_with_assistant_and_vector_store(
195
200
  prompt=triage_prompt,
196
201
  response_format=InboundEmailTriageResponse,
197
- model="gpt-5.1-chat",
202
+ model=DEFAULT_TRIAGE_MODEL,
198
203
  vector_store_id=cleaned_context.external_known_data.external_openai_vector_store_id,
199
204
  tool_config=tool_config,
200
205
  use_cache=cleaned_context.message_instructions.use_cache if cleaned_context.message_instructions else True
@@ -203,13 +208,28 @@ async def get_inbound_email_triage_action(
203
208
  triage_only, status = await get_structured_output_internal(
204
209
  prompt=triage_prompt,
205
210
  response_format=InboundEmailTriageResponse,
206
- model="gpt-5.1-chat",
211
+ model=DEFAULT_TRIAGE_MODEL,
207
212
  tool_config=tool_config,
208
213
  use_cache=cleaned_context.message_instructions.use_cache if cleaned_context.message_instructions else True
209
214
  )
210
215
 
211
216
  if status != "SUCCESS":
212
- raise Exception("Error in generating triage action.")
217
+ campaign_id = context.campaign_context.campaign_id if context.campaign_context else 'N/A'
218
+ if status == "CONTEXT_LENGTH_EXCEEDED":
219
+ raise Exception(
220
+ f"Email thread too long for model. Campaign ID: {campaign_id}. "
221
+ f"Consider truncating thread or switching models."
222
+ )
223
+ elif status in ("API_ERROR", "ERROR"):
224
+ raise Exception(
225
+ f"Error in generating triage action. Status: {status}. "
226
+ f"Campaign ID: {campaign_id}. Details: {triage_only}"
227
+ )
228
+ else:
229
+ raise Exception(
230
+ f"Error in generating triage action. Status: {status}. "
231
+ f"Campaign ID: {campaign_id}"
232
+ )
213
233
  return triage_only
214
234
 
215
235
 
@@ -393,7 +413,7 @@ async def generate_inbound_email_response_copy(
393
413
  initial_response, status = await get_structured_output_with_assistant_and_vector_store(
394
414
  prompt=prompt,
395
415
  response_format=InboundEmailTriageResponse,
396
- model="gpt-5.1-chat",
416
+ model=DEFAULT_TRIAGE_MODEL,
397
417
  vector_store_id=cleaned_context.external_known_data.external_openai_vector_store_id,
398
418
  tool_config=tool_config
399
419
  )
@@ -401,12 +421,30 @@ async def generate_inbound_email_response_copy(
401
421
  initial_response, status = await get_structured_output_internal(
402
422
  prompt=prompt,
403
423
  response_format=InboundEmailTriageResponse,
404
- model="gpt-5.1-chat",
424
+ model=DEFAULT_TRIAGE_MODEL,
405
425
  tool_config=tool_config
406
426
  )
407
427
 
408
428
  if status != "SUCCESS":
409
- raise Exception("Error in generating the inbound email triage response.")
429
+ campaign_id = (
430
+ campaign_context.campaign_context.campaign_id
431
+ if campaign_context.campaign_context else 'N/A'
432
+ )
433
+ if status == "CONTEXT_LENGTH_EXCEEDED":
434
+ raise Exception(
435
+ f"Email thread too long for model. Campaign ID: {campaign_id}. "
436
+ f"Consider truncating thread or switching models."
437
+ )
438
+ elif status in ("API_ERROR", "ERROR"):
439
+ raise Exception(
440
+ f"Error in generating inbound email triage response. Status: {status}. "
441
+ f"Campaign ID: {campaign_id}. Details: {initial_response}"
442
+ )
443
+ else:
444
+ raise Exception(
445
+ f"Error in generating inbound email triage response. Status: {status}. "
446
+ f"Campaign ID: {campaign_id}"
447
+ )
410
448
 
411
449
  response_item = MessageItem(
412
450
  message_id="", # or generate one if appropriate
@@ -215,6 +215,8 @@ async def generate_leads_code(
215
215
  )
216
216
 
217
217
  # Return dict + status
218
+ if status != "SUCCESS":
219
+ return {"workflow_python_code": ""}, status
218
220
  return response.model_dump(), status
219
221
 
220
222
 
@@ -21,6 +21,12 @@ from dhisana.utils.generate_structured_output_internal import (
21
21
  )
22
22
  from dhisana.utils.assistant_tool_tag import assistant_tool
23
23
 
24
+ # ---------------------------------------------------------------------------------------
25
+ # CONSTANTS
26
+ # ---------------------------------------------------------------------------------------
27
+ DEFAULT_OPENAI_MODEL = "gpt-4.1"
28
+
29
+
24
30
  # ----------------------------------------------------------------------
25
31
  # LinkedIn Connection Message Schema
26
32
  # ----------------------------------------------------------------------
@@ -151,7 +157,7 @@ async def generate_personalized_linkedin_copy(
151
157
  prompt=prompt,
152
158
  response_format=LinkedInConnectMessage,
153
159
  vector_store_id=vector_store_id,
154
- model="gpt-5.1-chat",
160
+ model=DEFAULT_OPENAI_MODEL,
155
161
  tool_config=tool_config,
156
162
  use_cache=linkedin_context.message_instructions.use_cache if linkedin_context.message_instructions else True
157
163
  )
@@ -160,7 +166,7 @@ async def generate_personalized_linkedin_copy(
160
166
  response_data, status = await get_structured_output_internal(
161
167
  prompt=prompt,
162
168
  response_format=LinkedInConnectMessage,
163
- model="gpt-5.1-chat",
169
+ model=DEFAULT_OPENAI_MODEL,
164
170
  tool_config=tool_config,
165
171
  use_cache=linkedin_context.message_instructions.use_cache if linkedin_context.message_instructions else True
166
172
  )
@@ -3,6 +3,7 @@ import hashlib
3
3
  import json
4
4
  import logging
5
5
  import random
6
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
7
 
7
8
  from fastapi import HTTPException
8
9
  from pydantic import BaseModel
@@ -17,11 +18,16 @@ from dhisana.utils.fetch_openai_config import (
17
18
  _extract_config,
18
19
  create_async_openai_client,
19
20
  )
20
- from typing import Any, Dict, List, Optional, Tuple, Union
21
21
 
22
- from openai import OpenAIError, RateLimitError
23
- from pydantic import BaseModel
24
22
 
23
+ # ──────────────────────────────────────────────────────────────────────────────
24
+ # 1. Helper functions
25
+ # ──────────────────────────────────────────────────────────────────────────────
26
+
27
+ def is_context_length_error(error: Exception) -> bool:
28
+ """Check if an error is due to context length being exceeded."""
29
+ error_str = str(error).lower()
30
+ return "context_length_exceeded" in error_str or "context window" in error_str
25
31
 
26
32
 
27
33
  # ──────────────────────────────────────────────────────────────────────────────
@@ -144,11 +150,17 @@ async def get_structured_output_internal(
144
150
  completion = await _make_request()
145
151
  break # success → exit loop
146
152
  except (RateLimitError, OpenAIError) as e:
153
+ # Check for context length exceeded error
154
+ if is_context_length_error(e):
155
+ logging.error(f"Context length exceeded: {e}")
156
+ return f"Context length exceeded: {str(e)}", "CONTEXT_LENGTH_EXCEEDED"
157
+
147
158
  # Detect 429 / rate-limit
159
+ error_str = str(e).lower()
148
160
  is_rl = (
149
161
  isinstance(e, RateLimitError)
150
162
  or getattr(e, "status_code", None) == 429
151
- or "rate_limit" in str(e).lower()
163
+ or "rate_limit" in error_str
152
164
  )
153
165
  if is_rl and attempt < max_retries:
154
166
  attempt += 1
@@ -161,7 +173,7 @@ async def get_structured_output_internal(
161
173
  await asyncio.sleep(wait_time)
162
174
  continue # retry once
163
175
  logging.error(f"OpenAI API error: {e}")
164
- raise HTTPException(status_code=502, detail="Error communicating with the OpenAI API.")
176
+ return f"OpenAI API error: {str(e)}", "API_ERROR"
165
177
 
166
178
  # ─── handle model output (unchanged) ────────────────────────────────
167
179
  if completion and completion.output and len(completion.output) > 0:
@@ -203,12 +215,15 @@ async def get_structured_output_internal(
203
215
  else:
204
216
  return "No output returned", "FAIL"
205
217
 
218
+ # Safety fallback: catch any OpenAI errors not caught by inner retry loop
206
219
  except OpenAIError as e:
207
220
  logging.error(f"OpenAI API error: {e}")
208
- raise HTTPException(status_code=502, detail="Error communicating with the OpenAI API.")
221
+ if is_context_length_error(e):
222
+ return f"Context length exceeded: {str(e)}", "CONTEXT_LENGTH_EXCEEDED"
223
+ return f"OpenAI API error: {str(e)}", "API_ERROR"
209
224
  except Exception as e:
210
225
  logging.error(f"Unexpected error: {e}")
211
- raise HTTPException(status_code=500, detail="Unexpected server error.")
226
+ return f"Unexpected error: {str(e)}", "ERROR"
212
227
 
213
228
 
214
229
 
@@ -293,11 +308,17 @@ async def get_structured_output_with_mcp(
293
308
  return await client_async.responses.create(**kwargs)
294
309
 
295
310
  # ─── Retry once for 429s ──────────────────────────────────────────────────
311
+ completion = None
296
312
  for attempt in range(2):
297
313
  try:
298
314
  completion = await _make_request()
299
315
  break
300
316
  except (RateLimitError, OpenAIError) as exc:
317
+ # Check for context length exceeded error
318
+ if is_context_length_error(exc):
319
+ logging.error(f"Context length exceeded: {exc}")
320
+ return f"Context length exceeded: {str(exc)}", "CONTEXT_LENGTH_EXCEEDED"
321
+
301
322
  if attempt == 0 and (
302
323
  isinstance(exc, RateLimitError)
303
324
  or getattr(exc, "status_code", None) == 429
@@ -308,9 +329,10 @@ async def get_structured_output_with_mcp(
308
329
  await asyncio.sleep(sleep_for)
309
330
  continue
310
331
  logging.error("OpenAI API error: %s", exc)
311
- raise HTTPException(502, detail="Error communicating with the OpenAI API.") from exc
312
- else: # pragma: no cover
313
- raise HTTPException(502, detail="OpenAI request retry loop failed.")
332
+ return f"OpenAI API error: {str(exc)}", "API_ERROR"
333
+
334
+ if not completion:
335
+ return "OpenAI request retry loop failed", "API_ERROR"
314
336
 
315
337
  # ─── Parse the model’s structured output ──────────────────────────────────
316
338
  if not (completion and completion.output):
@@ -438,7 +460,7 @@ async def get_structured_output_with_assistant_and_vector_store(
438
460
  break
439
461
 
440
462
  if not raw_text or not raw_text.strip():
441
- raise HTTPException(status_code=502, detail="No response from the model.")
463
+ return "No response from the model", "FAIL"
442
464
 
443
465
  try:
444
466
  parsed_obj = response_format.parse_raw(raw_text)
@@ -454,9 +476,12 @@ async def get_structured_output_with_assistant_and_vector_store(
454
476
  else:
455
477
  return "No output returned", "FAIL"
456
478
 
479
+ # Safety fallback: catch any errors not caught during API call
457
480
  except OpenAIError as e:
458
481
  logging.error(f"OpenAI API error: {e}")
459
- raise HTTPException(status_code=502, detail="Error communicating with the OpenAI API.")
482
+ if is_context_length_error(e):
483
+ return f"Context length exceeded: {str(e)}", "CONTEXT_LENGTH_EXCEEDED"
484
+ return f"OpenAI API error: {str(e)}", "API_ERROR"
460
485
  except Exception as e:
461
486
  logging.error(f"Unexpected error: {e}")
462
- raise HTTPException(status_code=500, detail="Unexpected server error.")
487
+ return f"Unexpected error: {str(e)}", "ERROR"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dhisana
3
- Version: 0.0.1.dev263
3
+ Version: 0.0.1.dev265
4
4
  Summary: A Python SDK for Dhisana AI Platform
5
5
  Home-page: https://github.com/dhisana-ai/dhisana-python-sdk
6
6
  Author: Admin
@@ -32,20 +32,20 @@ dhisana/utils/domain_parser.py,sha256=Kw5MPP06wK2azWQzuSiOE-DffOezLqDyF-L9JEBsMS
32
32
  dhisana/utils/email_body_utils.py,sha256=rlCVjdBlqNnEiUberJGXGcrYY1GQOkW0-aB6AEpS3L4,2302
33
33
  dhisana/utils/email_parse_helpers.py,sha256=LIdm1B1IyGSW50y8EkxOk6YRjvxO2SJTgTKPLxYls_o,4613
34
34
  dhisana/utils/email_provider.py,sha256=spjbNdnaVfCZEUw62EEHKijuXjI7vTVNqsftxJ15Erw,14352
35
- dhisana/utils/enrich_lead_information.py,sha256=F6nn9W-yUi4buJZ3EI21IncvaJExKkYu1WHtaKkFFWs,39409
35
+ dhisana/utils/enrich_lead_information.py,sha256=O0fV-8MlXFT_z5aXvmvXVT76AISN94GpvAOlq3q_Phw,39411
36
36
  dhisana/utils/extract_email_content_for_llm.py,sha256=SQmMZ3YJtm3ZI44XiWEVAItcAwrsSSy1QzDne7LTu_Q,3713
37
37
  dhisana/utils/fetch_openai_config.py,sha256=LjWdFuUeTNeAW106pb7DLXZNElos2PlmXRe6bHZJ2hw,5159
38
38
  dhisana/utils/field_validators.py,sha256=BZgNCpBG264aRqNUu_J67c6zfr15zlAaIw2XRy8J7DY,11809
39
39
  dhisana/utils/g2_tools.py,sha256=a4vmBYCBvLae5CdpOhMN1oNlvO8v9J1B5Sd8T5PzuU8,3346
40
40
  dhisana/utils/generate_content.py,sha256=kkf-aPuA32BNgwk_j5N6unYHOZpO7zIfO6zP95XM9fA,2298
41
41
  dhisana/utils/generate_custom_message.py,sha256=tQsryytoYKP5uF3bRENeZks1LvOMFCP6L1487P_r_hk,12072
42
- dhisana/utils/generate_email.py,sha256=FtzrqGwv0n6enHSF12RnviCC013zRV4Bjaq6jx_fldQ,12979
43
- dhisana/utils/generate_email_response.py,sha256=nGjIZp3PMWF2lMWJ7HIEAuF-mlyQGbEXJcyQGExB-KI,19504
42
+ dhisana/utils/generate_email.py,sha256=Qufol-qgOk8loNulZ6hMzIRr45Ml7JwJpLO5OMabbH8,13217
43
+ dhisana/utils/generate_email_response.py,sha256=Xk6t2hW_QbumvXf5uUdsD-Lkq8hfzRD9QWioYcdWM1k,21194
44
44
  dhisana/utils/generate_flow.py,sha256=QMn6bWo0nH0fBvy2Ebub1XfH5udnVAqsPsbIqCtQPXU,4728
45
- dhisana/utils/generate_leads_salesnav.py,sha256=AONP1KXDJdg95JQBmKx5PQXUD2BHctc-MZOMuRfuK9U,12156
46
- dhisana/utils/generate_linkedin_connect_message.py,sha256=Y71eVZ5EwE7e2S1qrJsLsYzecDC-9Fymj6RYLyVB0ys,9969
45
+ dhisana/utils/generate_leads_salesnav.py,sha256=FG7q6GSm9IywZ9TgQnn5_N3QNfiI-Qk2gaO_3GS99nY,12236
46
+ dhisana/utils/generate_linkedin_connect_message.py,sha256=QxsxDiT-3eQOqAAbW13d0HGJXV36WYPvC-7Zsw_2VTI,10208
47
47
  dhisana/utils/generate_linkedin_response_message.py,sha256=mWoSs5p2JSTIoFZFGm86x1kgs67J7dHPvGKZPzcdGdU,14569
48
- dhisana/utils/generate_structured_output_internal.py,sha256=DmZ5QzW-79Jo3JL5nDCZQ-Fjl8Nz7FHK6S0rZxXbKyg,20705
48
+ dhisana/utils/generate_structured_output_internal.py,sha256=k6w5zaaMigp7cUgFALr-TRBsoEQTlzyGfH4R8HIIfGU,22116
49
49
  dhisana/utils/google_custom_search.py,sha256=5rQ4uAF-hjFpd9ooJkd6CjRvSmhZHhqM0jfHItsbpzk,10071
50
50
  dhisana/utils/google_oauth_tools.py,sha256=ReG5lCpXL3_e_s0yn6ai4U7B4-feOWHJVtbv_c0g0rE,28525
51
51
  dhisana/utils/google_workspace_tools.py,sha256=fuV0UcvAqF9drLzj7-p6D5zh7d5jMXl1jNJTICk4XOo,50224
@@ -95,8 +95,8 @@ dhisana/workflow/agent.py,sha256=esv7_i_XuMkV2j1nz_UlsHov_m6X5WZZiZm_tG4OBHU,565
95
95
  dhisana/workflow/flow.py,sha256=xWE3qQbM7j2B3FH8XnY3zOL_QXX4LbTW4ArndnEYJE0,1638
96
96
  dhisana/workflow/task.py,sha256=HlWz9mtrwLYByoSnePOemBUBrMEcj7KbgNjEE1oF5wo,1830
97
97
  dhisana/workflow/test.py,sha256=E7lRnXK0PguTNzyasHytLzTJdkqIPxG5_4qk4hMEeKc,3399
98
- dhisana-0.0.1.dev263.dist-info/METADATA,sha256=bXLyYLDa5vvAlz7vpCbBYdBIv5-1s8XLbO-HGAnZFRA,1190
99
- dhisana-0.0.1.dev263.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
- dhisana-0.0.1.dev263.dist-info/entry_points.txt,sha256=jujxteZmNI9EkEaK-pOCoWuBujU8TCevdkfl9ZcKHek,49
101
- dhisana-0.0.1.dev263.dist-info/top_level.txt,sha256=NETTHt6YifG_P7XtRHbQiXZlgSFk9Qh9aR-ng1XTf4s,8
102
- dhisana-0.0.1.dev263.dist-info/RECORD,,
98
+ dhisana-0.0.1.dev265.dist-info/METADATA,sha256=S6o7HBV1UGxYP3ltCxcwGEYDhpQ5VsZ_sEUXvf_cIcY,1190
99
+ dhisana-0.0.1.dev265.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
+ dhisana-0.0.1.dev265.dist-info/entry_points.txt,sha256=jujxteZmNI9EkEaK-pOCoWuBujU8TCevdkfl9ZcKHek,49
101
+ dhisana-0.0.1.dev265.dist-info/top_level.txt,sha256=NETTHt6YifG_P7XtRHbQiXZlgSFk9Qh9aR-ng1XTf4s,8
102
+ dhisana-0.0.1.dev265.dist-info/RECORD,,