dhisana 0.0.1.dev235__py3-none-any.whl → 0.0.1.dev237__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dhisana/schemas/sales.py CHANGED
@@ -801,6 +801,16 @@ class LeadsQueryFilters(BaseModel):
801
801
  default=None,
802
802
  description="List of industry tag IDs, e.g. ['5567cd4773696439b10b0000']. Maps to organization_industry_tag_ids."
803
803
  )
804
+
805
+ q_organization_keyword_tags: Optional[List[str]] = Field(
806
+ default=None,
807
+ description="Organization Keyword tags to search by"
808
+ )
809
+
810
+ q_not_organization_keyword_tags: Optional[List[str]] = Field(
811
+ default=None,
812
+ description="Organization Keyword tags to search by"
813
+ )
804
814
 
805
815
  q_organization_search_list_id: Optional[str] = Field(
806
816
  default=None,
@@ -865,6 +875,16 @@ class CompanyQueryFilters(BaseModel):
865
875
  default=None,
866
876
  description="List of industry tag IDs, e.g. ['5567cd4773696439b10b0000']."
867
877
  )
878
+
879
+ q_organization_keyword_tags: Optional[List[str]] = Field(
880
+ default=None,
881
+ description="Organization Keyword tags to search by"
882
+ )
883
+
884
+ q_not_organization_keyword_tags: Optional[List[str]] = Field(
885
+ default=None,
886
+ description="Organization Keyword tags to search by"
887
+ )
868
888
 
869
889
  # Revenue filters
870
890
  revenue_range_min: Optional[int] = Field(
@@ -847,6 +847,11 @@ async def search_leads_with_apollo_page(
847
847
  dynamic_payload["sort_by_field"] = query.sort_by_field
848
848
  if query.sort_ascending is not None:
849
849
  dynamic_payload["sort_ascending"] = query.sort_ascending
850
+ if query.q_organization_keyword_tags:
851
+ dynamic_payload["q_organization_keyword_tags"] = query.q_organization_keyword_tags
852
+
853
+ if query.q_not_organization_keyword_tags:
854
+ dynamic_payload["q_not_organization_keyword_tags"] = query.q_not_organization_keyword_tags
850
855
 
851
856
  page_payload = dict(dynamic_payload)
852
857
  page_payload["page"] = page
@@ -1474,7 +1479,13 @@ async def search_companies_with_apollo_page(
1474
1479
  keyword_tags = [tag.strip() for tag in query.q_keywords.split(",") if tag.strip()]
1475
1480
  else:
1476
1481
  keyword_tags = query.q_keywords
1482
+
1483
+ if query.q_organization_keyword_tags:
1477
1484
  dynamic_payload["q_organization_keyword_tags"] = keyword_tags
1485
+
1486
+ if query.q_not_organization_keyword_tags:
1487
+ dynamic_payload["q_not_organization_keyword_tags"] = query.q_not_organization_keyword_tags
1488
+
1478
1489
  if query.q_organization_domains:
1479
1490
  dynamic_payload["q_organization_domains"] = query.q_organization_domains
1480
1491
  if query.revenue_range_min is not None:
@@ -765,16 +765,32 @@ async def enrich_organization_info_from_company_url(
765
765
  organization_linkedin_url: str,
766
766
  use_strict_check: bool = True,
767
767
  tool_config: Optional[List[Dict[str, Any]]] = None,
768
+ categories: Optional[bool] = None,
769
+ funding_data: Optional[bool] = None,
770
+ exit_data: Optional[bool] = None,
771
+ acquisitions: Optional[bool] = None,
772
+ extra: Optional[bool] = None,
773
+ use_cache: Optional[str] = "if-present",
774
+ fallback_to_cache: Optional[str] = "on-error",
768
775
  ) -> Dict[str, Any]:
769
776
  """
770
777
  Given an organization LinkedIn URL, attempt to enrich its data (e.g. name, website)
771
- via ProxyCurl. If data is found, set domain, then return the dict. Otherwise, return {}.
778
+ via ProxyCurl. Additional Proxycurl Company API boolean flags (categories, funding_data, etc.)
779
+ can be supplied to control the returned payload (True -> "include"). If data is found,
780
+ set domain, then return the dict. Otherwise, return {}.
772
781
  """
773
782
 
774
783
  # Call ProxyCurl to enrich
775
784
  company_data = await enrich_organization_info_from_proxycurl(
776
785
  organization_linkedin_url=organization_linkedin_url,
777
- tool_config=tool_config
786
+ tool_config=tool_config,
787
+ categories=categories,
788
+ funding_data=funding_data,
789
+ exit_data=exit_data,
790
+ acquisitions=acquisitions,
791
+ extra=extra,
792
+ use_cache=use_cache,
793
+ fallback_to_cache=fallback_to_cache,
778
794
  )
779
795
 
780
796
  # If ProxyCurl returned any data, set domain, then return
@@ -194,46 +194,62 @@ async def list_emails_in_time_range_google_oauth_async(
194
194
  q_parts.extend([f"label:{lbl}" for lbl in context.labels])
195
195
  query = " ".join(q_parts)
196
196
 
197
- params = {"q": query}
197
+ params = {"q": query, "maxResults": 100}
198
198
 
199
199
  items: List[MessageItem] = []
200
+ max_fetch = 500 # defensive cap to avoid excessive paging
200
201
  async with httpx.AsyncClient(timeout=30) as client:
201
202
  try:
202
- list_resp = await client.get(base_url, headers=headers, params=params)
203
- list_resp.raise_for_status()
204
- list_data = list_resp.json() or {}
205
- for m in list_data.get("messages", []) or []:
206
- mid = m.get("id")
207
- tid = m.get("threadId")
208
- if not mid:
209
- continue
210
- get_url = f"{base_url}/{mid}"
211
- get_resp = await client.get(get_url, headers=headers)
212
- get_resp.raise_for_status()
213
- mdata = get_resp.json() or {}
214
-
215
- headers_list = (mdata.get("payload") or {}).get("headers", [])
216
- from_header = find_header(headers_list, "From") or ""
217
- subject_header = find_header(headers_list, "Subject") or ""
218
- date_header = find_header(headers_list, "Date") or ""
219
-
220
- iso_dt = convert_date_to_iso(date_header)
221
- s_name, s_email = parse_single_address(from_header)
222
- r_name, r_email = find_all_recipients_in_headers(headers_list)
223
-
224
- items.append(
225
- MessageItem(
226
- message_id=mdata.get("id", ""),
227
- thread_id=tid or "",
228
- sender_name=s_name,
229
- sender_email=s_email,
230
- receiver_name=r_name,
231
- receiver_email=r_email,
232
- iso_datetime=iso_dt,
233
- subject=subject_header,
234
- body=extract_email_body_in_plain_text(mdata),
203
+ next_page_token = None
204
+ while True:
205
+ page_params = dict(params)
206
+ if next_page_token:
207
+ page_params["pageToken"] = next_page_token
208
+
209
+ list_resp = await client.get(base_url, headers=headers, params=page_params)
210
+ list_resp.raise_for_status()
211
+ list_data = list_resp.json() or {}
212
+ for m in list_data.get("messages", []) or []:
213
+ if len(items) >= max_fetch:
214
+ break
215
+ mid = m.get("id")
216
+ tid = m.get("threadId")
217
+ if not mid:
218
+ continue
219
+ get_url = f"{base_url}/{mid}"
220
+ get_resp = await client.get(get_url, headers=headers)
221
+ get_resp.raise_for_status()
222
+ mdata = get_resp.json() or {}
223
+
224
+ headers_list = (mdata.get("payload") or {}).get("headers", [])
225
+ from_header = find_header(headers_list, "From") or ""
226
+ subject_header = find_header(headers_list, "Subject") or ""
227
+ date_header = find_header(headers_list, "Date") or ""
228
+
229
+ iso_dt = convert_date_to_iso(date_header)
230
+ s_name, s_email = parse_single_address(from_header)
231
+ r_name, r_email = find_all_recipients_in_headers(headers_list)
232
+
233
+ items.append(
234
+ MessageItem(
235
+ message_id=mdata.get("id", ""),
236
+ thread_id=tid or "",
237
+ sender_name=s_name,
238
+ sender_email=s_email,
239
+ receiver_name=r_name,
240
+ receiver_email=r_email,
241
+ iso_datetime=iso_dt,
242
+ subject=subject_header,
243
+ body=extract_email_body_in_plain_text(mdata),
244
+ )
235
245
  )
236
- )
246
+
247
+ if len(items) >= max_fetch:
248
+ break
249
+
250
+ next_page_token = list_data.get("nextPageToken")
251
+ if not next_page_token:
252
+ break
237
253
  except httpx.HTTPStatusError as exc:
238
254
  _rethrow_with_google_message(exc, "Gmail List OAuth")
239
255
 
@@ -241,47 +241,64 @@ async def list_emails_in_time_range_async(
241
241
  query += f' {label_query}'
242
242
 
243
243
  headers = {'Authorization': f'Bearer {access_token}'}
244
- params = {'q': query}
244
+ params = {'q': query, 'maxResults': 100}
245
245
 
246
246
  message_items: List[MessageItem] = []
247
+ max_fetch = 500 # defensive cap
247
248
  async with httpx.AsyncClient() as client:
248
- response = await client.get(gmail_api_url, headers=headers, params=params)
249
- response.raise_for_status()
250
- messages = response.json().get('messages', [])
251
-
252
- for msg in messages:
253
- message_id = msg['id']
254
- thread_id = msg['threadId']
255
- message_url = f'{gmail_api_url}/{message_id}'
256
- message_response = await client.get(message_url, headers=headers)
257
- message_response.raise_for_status()
258
- message_data = message_response.json()
259
-
260
- headers_list = message_data['payload']['headers']
261
- from_header = find_header(headers_list, 'From') or ""
262
- subject_header = find_header(headers_list, 'Subject') or ""
263
- date_header = find_header(headers_list, 'Date') or ""
264
-
265
- iso_datetime_str = convert_date_to_iso(date_header)
249
+ next_page_token = None
250
+ while True:
251
+ page_params = dict(params)
252
+ if next_page_token:
253
+ page_params["pageToken"] = next_page_token
266
254
 
267
- # Parse the "From" into (sender_name, sender_email)
268
- s_name, s_email = parse_single_address(from_header)
255
+ response = await client.get(gmail_api_url, headers=headers, params=page_params)
256
+ response.raise_for_status()
257
+ resp_json = response.json() or {}
258
+ messages = resp_json.get('messages', [])
259
+
260
+ for msg in messages:
261
+ if len(message_items) >= max_fetch:
262
+ break
263
+ message_id = msg['id']
264
+ thread_id = msg.get('threadId', "")
265
+ message_url = f'{gmail_api_url}/{message_id}'
266
+ message_response = await client.get(message_url, headers=headers)
267
+ message_response.raise_for_status()
268
+ message_data = message_response.json()
269
+
270
+ headers_list = message_data['payload']['headers']
271
+ from_header = find_header(headers_list, 'From') or ""
272
+ subject_header = find_header(headers_list, 'Subject') or ""
273
+ date_header = find_header(headers_list, 'Date') or ""
274
+
275
+ iso_datetime_str = convert_date_to_iso(date_header)
276
+
277
+ # Parse the "From" into (sender_name, sender_email)
278
+ s_name, s_email = parse_single_address(from_header)
279
+
280
+ # Parse the recipients
281
+ r_name, r_email = find_all_recipients_in_headers(headers_list)
282
+
283
+ msg_item = MessageItem(
284
+ message_id=message_data['id'],
285
+ thread_id=thread_id,
286
+ sender_name=s_name,
287
+ sender_email=s_email,
288
+ receiver_name=r_name,
289
+ receiver_email=r_email,
290
+ iso_datetime=iso_datetime_str,
291
+ subject=subject_header,
292
+ body=extract_email_body_in_plain_text(message_data)
293
+ )
294
+ message_items.append(msg_item)
269
295
 
270
- # Parse the recipients
271
- r_name, r_email = find_all_recipients_in_headers(headers_list)
296
+ if len(message_items) >= max_fetch:
297
+ break
272
298
 
273
- msg_item = MessageItem(
274
- message_id=message_data['id'],
275
- thread_id=thread_id,
276
- sender_name=s_name,
277
- sender_email=s_email,
278
- receiver_name=r_name,
279
- receiver_email=r_email,
280
- iso_datetime=iso_datetime_str,
281
- subject=subject_header,
282
- body=extract_email_body_in_plain_text(message_data)
283
- )
284
- message_items.append(msg_item)
299
+ next_page_token = resp_json.get("nextPageToken")
300
+ if not next_page_token:
301
+ break
285
302
 
286
303
  return message_items
287
304
 
@@ -103,9 +103,7 @@ async def enrich_person_info_from_proxycurl(
103
103
  params['url'] = linkedin_url
104
104
  if email:
105
105
  params['email'] = email
106
- else:
107
- # Request Proxycurl to include personal emails when no email is provided
108
- params['personal_email'] = 'include'
106
+
109
107
  if phone:
110
108
  params['phone'] = phone
111
109
 
@@ -273,6 +271,46 @@ def transform_company_data(data: dict) -> dict:
273
271
  return transformed
274
272
 
275
273
 
274
+ def _build_company_profile_params(
275
+ company_url: str,
276
+ profile_flags: Dict[str, Optional[str]],
277
+ ) -> Dict[str, str]:
278
+ """
279
+ Build request params for the Enrichlayer company profile endpoint,
280
+ ensuring we only forward flags that were explicitly provided.
281
+ """
282
+ params: Dict[str, str] = {'url': company_url}
283
+ for key, value in profile_flags.items():
284
+ if value is not None:
285
+ params[key] = value
286
+ return params
287
+
288
+
289
+ def _build_company_cache_key(identifier: str, profile_flags: Dict[str, Optional[str]]) -> str:
290
+ """
291
+ Builds a cache key that is unique for the combination of identifier
292
+ (LinkedIn URL or domain) and the optional enrichment flags.
293
+ """
294
+ suffix_bits = [
295
+ f"{key}={value}"
296
+ for key, value in sorted(profile_flags.items())
297
+ if value is not None
298
+ ]
299
+ if suffix_bits:
300
+ return f"{identifier}|{'&'.join(suffix_bits)}"
301
+ return identifier
302
+
303
+
304
+ def _bool_to_include_exclude(value: Optional[bool]) -> Optional[str]:
305
+ """
306
+ Convert a boolean flag into the string literals expected by Proxycurl.
307
+ True -> "include", False -> "exclude", None -> None (omit parameter).
308
+ """
309
+ if value is None:
310
+ return None
311
+ return "include" if value else "exclude"
312
+
313
+
276
314
  @backoff.on_exception(
277
315
  backoff.expo,
278
316
  aiohttp.ClientResponseError,
@@ -283,10 +321,27 @@ def transform_company_data(data: dict) -> dict:
283
321
  async def enrich_organization_info_from_proxycurl(
284
322
  organization_domain: Optional[str] = None,
285
323
  organization_linkedin_url: Optional[str] = None,
286
- tool_config: Optional[List[Dict]] = None
324
+ tool_config: Optional[List[Dict]] = None,
325
+ categories: Optional[bool] = None,
326
+ funding_data: Optional[bool] = None,
327
+ exit_data: Optional[bool] = None,
328
+ acquisitions: Optional[bool] = None,
329
+ extra: Optional[bool] = None,
330
+ use_cache: Optional[str] = "if-present",
331
+ fallback_to_cache: Optional[str] = "on-error",
287
332
  ) -> Dict:
288
333
  """
289
334
  Fetch an organization's details from Proxycurl using either the organization domain or LinkedIn URL.
335
+ Additional keyword parameters map directly to the Enrichlayer Company Profile endpoint.
336
+
337
+ Args:
338
+ organization_domain: Organization's domain name to resolve via Proxycurl.
339
+ organization_linkedin_url: LinkedIn company profile URL.
340
+ tool_config: Optional tool configuration metadata for credential lookup.
341
+ categories/funding_data/exit_data/acquisitions/extra: Set True to request
342
+ "include", False for "exclude", or None to omit.
343
+ use_cache: Controls Proxycurl caching behaviour (e.g. "if-present").
344
+ fallback_to_cache: Controls Proxycurl cache fallback behaviour (e.g. "on-error").
290
345
 
291
346
  Returns:
292
347
  dict: Transformed JSON response containing organization information,
@@ -308,6 +363,16 @@ async def enrich_organization_info_from_proxycurl(
308
363
  logger.warning("No organization domain or LinkedIn URL provided.")
309
364
  return {}
310
365
 
366
+ profile_flags: Dict[str, Optional[str]] = {
367
+ "categories": _bool_to_include_exclude(categories),
368
+ "funding_data": _bool_to_include_exclude(funding_data),
369
+ "exit_data": _bool_to_include_exclude(exit_data),
370
+ "acquisitions": _bool_to_include_exclude(acquisitions),
371
+ "extra": _bool_to_include_exclude(extra),
372
+ "use_cache": use_cache,
373
+ "fallback_to_cache": fallback_to_cache,
374
+ }
375
+
311
376
  # If LinkedIn URL is provided, standardize it and fetch data
312
377
  if organization_linkedin_url:
313
378
  logger.debug(f"Organization LinkedIn URL provided: {organization_linkedin_url}")
@@ -330,8 +395,9 @@ async def enrich_organization_info_from_proxycurl(
330
395
  if standardized_url and not standardized_url.endswith('/'):
331
396
  standardized_url += '/'
332
397
 
398
+ cache_key = _build_company_cache_key(standardized_url, profile_flags)
333
399
  # Check cache for standardized LinkedIn URL
334
- cached_response = retrieve_output("enrich_organization_info_from_proxycurl", standardized_url)
400
+ cached_response = retrieve_output("enrich_organization_info_from_proxycurl", cache_key)
335
401
  if cached_response is not None:
336
402
  logger.info(f"Cache hit for organization LinkedIn URL: {standardized_url}")
337
403
  cached_response = transform_company_data(cached_response)
@@ -339,11 +405,7 @@ async def enrich_organization_info_from_proxycurl(
339
405
 
340
406
  # Fetch details using standardized LinkedIn URL
341
407
  url = 'https://enrichlayer.com/api/v2/company'
342
- params = {
343
- 'url': standardized_url,
344
- 'use_cache': 'if-present',
345
- 'fallback_to_cache': 'on-error',
346
- }
408
+ params = _build_company_profile_params(standardized_url, profile_flags)
347
409
  logger.debug(f"Making request to Proxycurl with params: {params}")
348
410
 
349
411
  async with aiohttp.ClientSession() as session:
@@ -353,7 +415,7 @@ async def enrich_organization_info_from_proxycurl(
353
415
  if response.status == 200:
354
416
  result = await response.json()
355
417
  transformed_result = transform_company_data(result)
356
- cache_output("enrich_organization_info_from_proxycurl", standardized_url, transformed_result)
418
+ cache_output("enrich_organization_info_from_proxycurl", cache_key, transformed_result)
357
419
  logger.info("Successfully retrieved and transformed organization info from Proxycurl by LinkedIn URL.")
358
420
  return transformed_result
359
421
  elif response.status == 429:
@@ -367,7 +429,7 @@ async def enrich_organization_info_from_proxycurl(
367
429
  f"Proxycurl organization profile not found for LinkedIn URL {standardized_url}: {error_text}"
368
430
  )
369
431
  cache_output(
370
- "enrich_organization_info_from_proxycurl", standardized_url, {}
432
+ "enrich_organization_info_from_proxycurl", cache_key, {}
371
433
  )
372
434
  return {}
373
435
  else:
@@ -383,7 +445,8 @@ async def enrich_organization_info_from_proxycurl(
383
445
  # If organization domain is provided, resolve domain to LinkedIn URL and fetch data
384
446
  if organization_domain:
385
447
  logger.debug(f"Organization domain provided: {organization_domain}")
386
- cached_response = retrieve_output("enrich_organization_info_from_proxycurl", organization_domain)
448
+ domain_cache_key = _build_company_cache_key(organization_domain, profile_flags)
449
+ cached_response = retrieve_output("enrich_organization_info_from_proxycurl", domain_cache_key)
387
450
  if cached_response is not None:
388
451
  logger.info(f"Cache hit for organization domain: {organization_domain}")
389
452
  return cached_response
@@ -414,12 +477,13 @@ async def enrich_organization_info_from_proxycurl(
414
477
 
415
478
  profile_url = 'https://enrichlayer.com/api/v2/company'
416
479
  try:
417
- async with session.get(profile_url, headers=HEADERS, params={'url': standardized_url}) as profile_response:
480
+ profile_params = _build_company_profile_params(standardized_url, profile_flags)
481
+ async with session.get(profile_url, headers=HEADERS, params=profile_params) as profile_response:
418
482
  logger.debug(f"Received profile response status: {profile_response.status}")
419
483
  if profile_response.status == 200:
420
484
  result = await profile_response.json()
421
485
  transformed_result = transform_company_data(result)
422
- cache_output("enrich_organization_info_from_proxycurl", organization_domain, transformed_result)
486
+ cache_output("enrich_organization_info_from_proxycurl", domain_cache_key, transformed_result)
423
487
  logger.info("Successfully retrieved and transformed organization info from Proxycurl by domain.")
424
488
  return transformed_result
425
489
  elif profile_response.status == 429:
@@ -445,7 +509,7 @@ async def enrich_organization_info_from_proxycurl(
445
509
  elif response.status == 404:
446
510
  msg = "Item not found"
447
511
  logger.warning(msg)
448
- cache_output("enrich_organization_info_from_proxycurl", organization_domain, {})
512
+ cache_output("enrich_organization_info_from_proxycurl", domain_cache_key, {})
449
513
  return {}
450
514
  else:
451
515
  error_text = await response.text()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dhisana
3
- Version: 0.0.1.dev235
3
+ Version: 0.0.1.dev237
4
4
  Summary: A Python SDK for Dhisana AI Platform
5
5
  Home-page: https://github.com/dhisana-ai/dhisana-python-sdk
6
6
  Author: Admin
@@ -6,13 +6,13 @@ dhisana/cli/models.py,sha256=IzUFZW_X32mL3fpM1_j4q8AF7v5nrxJcxBoqvG-TTgA,706
6
6
  dhisana/cli/predictions.py,sha256=VYgoLK1Ksv6MFImoYZqjQJkds7e5Hso65dHwbxTNNzE,646
7
7
  dhisana/schemas/__init__.py,sha256=jv2YF__bseklT3OWEzlqJ5qE24c4aWd5F4r0TTjOrWQ,65
8
8
  dhisana/schemas/common.py,sha256=rt1ho4nzVhTwTQ_1Kx5TI-xZSbnyDpYN0fQ8Fgf8z6k,9332
9
- dhisana/schemas/sales.py,sha256=k-ZTB-DaQbjvI882L6443H4gspWBFY-VrY2_1xlLn74,33587
9
+ dhisana/schemas/sales.py,sha256=XGPdTuHoNjvkq-xQtEh5Qfg5MQQoaJdDmu46aObBCV0,34219
10
10
  dhisana/ui/__init__.py,sha256=jv2YF__bseklT3OWEzlqJ5qE24c4aWd5F4r0TTjOrWQ,65
11
11
  dhisana/ui/components.py,sha256=4NXrAyl9tx2wWwoVYyABO-EOGnreGMvql1AkXWajIIo,14316
12
12
  dhisana/utils/__init__.py,sha256=jv2YF__bseklT3OWEzlqJ5qE24c4aWd5F4r0TTjOrWQ,65
13
13
  dhisana/utils/add_mapping.py,sha256=oq_QNqag86DhgdwINBRRXNx7SOb8Q9M-V0QLP6pTzr8,13837
14
14
  dhisana/utils/agent_tools.py,sha256=pzBFvfhU4wfSB4zv1eiRzjmnteJnfhC5V32r_v1m38Y,2321
15
- dhisana/utils/apollo_tools.py,sha256=DUg9HQBHwD9ZjqbdLP2QPWxbPSAN7aN2d_Brq4svSg0,64297
15
+ dhisana/utils/apollo_tools.py,sha256=kn0e46n8xGbpOz-3qLcAw9HLgGHv8DMfIPqLNukfdoU,64830
16
16
  dhisana/utils/assistant_tool_tag.py,sha256=rYRl8ubLI7fUUIjg30XTefHBkFgRqNEVC12lF6U6Z-8,119
17
17
  dhisana/utils/built_with_api_tools.py,sha256=TFNGhnPb2vFdveVCpjiCvE1WKe_eK95UPpR0Ha5NgMQ,10260
18
18
  dhisana/utils/cache_output_tools.py,sha256=sSAruvUZn-WAJQ0lB9T1QjSmkm-_14AuxC9xKmcCQ0k,3428
@@ -32,7 +32,7 @@ dhisana/utils/domain_parser.py,sha256=Kw5MPP06wK2azWQzuSiOE-DffOezLqDyF-L9JEBsMS
32
32
  dhisana/utils/email_body_utils.py,sha256=rlCVjdBlqNnEiUberJGXGcrYY1GQOkW0-aB6AEpS3L4,2302
33
33
  dhisana/utils/email_parse_helpers.py,sha256=LIdm1B1IyGSW50y8EkxOk6YRjvxO2SJTgTKPLxYls_o,4613
34
34
  dhisana/utils/email_provider.py,sha256=spjbNdnaVfCZEUw62EEHKijuXjI7vTVNqsftxJ15Erw,14352
35
- dhisana/utils/enrich_lead_information.py,sha256=xJXxhLqQcR2wukM9MDBJ80BAoE-SH0cwTpDWOEHx-gs,38730
35
+ dhisana/utils/enrich_lead_information.py,sha256=Z4knDUSV0DlaX-mnmTPCR2CLjN32qUPPc0AW2N0Dr74,39402
36
36
  dhisana/utils/extract_email_content_for_llm.py,sha256=SQmMZ3YJtm3ZI44XiWEVAItcAwrsSSy1QzDne7LTu_Q,3713
37
37
  dhisana/utils/fetch_openai_config.py,sha256=LjWdFuUeTNeAW106pb7DLXZNElos2PlmXRe6bHZJ2hw,5159
38
38
  dhisana/utils/field_validators.py,sha256=BZgNCpBG264aRqNUu_J67c6zfr15zlAaIw2XRy8J7DY,11809
@@ -46,8 +46,8 @@ dhisana/utils/generate_linkedin_connect_message.py,sha256=WZThEun-DMuAOqlzMI--hG
46
46
  dhisana/utils/generate_linkedin_response_message.py,sha256=-jg-u5Ipf4-cn9q0yjEHsEBe1eJhYLCLrjZDtOXnCyQ,14464
47
47
  dhisana/utils/generate_structured_output_internal.py,sha256=DmZ5QzW-79Jo3JL5nDCZQ-Fjl8Nz7FHK6S0rZxXbKyg,20705
48
48
  dhisana/utils/google_custom_search.py,sha256=5rQ4uAF-hjFpd9ooJkd6CjRvSmhZHhqM0jfHItsbpzk,10071
49
- dhisana/utils/google_oauth_tools.py,sha256=sxWZLHMfFSF4Wyu-FxQKQiDKDHe0Kl_rRk7D6ejBLYg,27609
50
- dhisana/utils/google_workspace_tools.py,sha256=pvO1rtDpknHAO9bmBKJ9Zhvrv65Og3U2x20W1ytql08,48185
49
+ dhisana/utils/google_oauth_tools.py,sha256=RbZvmjnueb8At35_kJOfFwDR36PN3ofh3iVV8ugJMwM,28307
50
+ dhisana/utils/google_workspace_tools.py,sha256=6z2PngTumEwtS9L9zXEpdtpAoD2sFYfpc7md6z7_p_o,48869
51
51
  dhisana/utils/hubspot_clearbit.py,sha256=keNX1F_RnDl9AOPxYEOTMdukV_A9g8v9j1fZyT4tuP4,3440
52
52
  dhisana/utils/hubspot_crm_tools.py,sha256=lbXFCeq690_TDLjDG8Gm5E-2f1P5EuDqNf5j8PYpMm8,99298
53
53
  dhisana/utils/instantly_tools.py,sha256=hhqjDPyLE6o0dzzuvryszbK3ipnoGU2eBm6NlsUGJjY,4771
@@ -60,7 +60,7 @@ dhisana/utils/openai_helpers.py,sha256=ZK9S5-jcLCpiiD6XBLkCqYcNz-AGYmO9xh4e2H-FD
60
60
  dhisana/utils/openapi_spec_to_tools.py,sha256=oBLVq3WeDWvW9O02NCvY8bxQURQdKwHJHGcX8bC_b2I,1926
61
61
  dhisana/utils/parse_linkedin_messages_txt.py,sha256=g3N_ac70mAEuDDQ7Ott6mkOaBwI3ZvcsJD3R9RlYwPQ,3320
62
62
  dhisana/utils/profile.py,sha256=12IhefaLp3j74zzBzVRe50_KWqtWZ_cdzUKlYNy9T2Y,1192
63
- dhisana/utils/proxy_curl_tools.py,sha256=V54zXOP3K2EWGSYanvw43n45hP_4KG8kw2n_LBiL0ak,49971
63
+ dhisana/utils/proxy_curl_tools.py,sha256=nHm3K_EHX8I-VGwONzCVPIA-OzbVMRXSA3VzmT_-uWk,52632
64
64
  dhisana/utils/proxycurl_search_leads.py,sha256=6PlraPNYQ4fIDzTYnY-T2g_ip5fPkqHigbGoPD8ZosQ,16131
65
65
  dhisana/utils/python_function_to_tools.py,sha256=jypddM6WTlIQmRWnqAugYJXvaPYaXaMgWAZRYeeGlj4,2682
66
66
  dhisana/utils/research_lead.py,sha256=L6w2fK5in8z2xmWnXBjbkvTdrwPf8ZfvAXq3gb7-S6s,7009
@@ -93,8 +93,8 @@ dhisana/workflow/agent.py,sha256=esv7_i_XuMkV2j1nz_UlsHov_m6X5WZZiZm_tG4OBHU,565
93
93
  dhisana/workflow/flow.py,sha256=xWE3qQbM7j2B3FH8XnY3zOL_QXX4LbTW4ArndnEYJE0,1638
94
94
  dhisana/workflow/task.py,sha256=HlWz9mtrwLYByoSnePOemBUBrMEcj7KbgNjEE1oF5wo,1830
95
95
  dhisana/workflow/test.py,sha256=E7lRnXK0PguTNzyasHytLzTJdkqIPxG5_4qk4hMEeKc,3399
96
- dhisana-0.0.1.dev235.dist-info/METADATA,sha256=FUmdIhxgFjRKEpg8NjqFPuJOVkVhuEi0GcEn47hztGU,1190
97
- dhisana-0.0.1.dev235.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
- dhisana-0.0.1.dev235.dist-info/entry_points.txt,sha256=jujxteZmNI9EkEaK-pOCoWuBujU8TCevdkfl9ZcKHek,49
99
- dhisana-0.0.1.dev235.dist-info/top_level.txt,sha256=NETTHt6YifG_P7XtRHbQiXZlgSFk9Qh9aR-ng1XTf4s,8
100
- dhisana-0.0.1.dev235.dist-info/RECORD,,
96
+ dhisana-0.0.1.dev237.dist-info/METADATA,sha256=LUfZpAEgY5TWqyp9QXwP55ofUXT5PzjjJmAWGTkKHDU,1190
97
+ dhisana-0.0.1.dev237.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
98
+ dhisana-0.0.1.dev237.dist-info/entry_points.txt,sha256=jujxteZmNI9EkEaK-pOCoWuBujU8TCevdkfl9ZcKHek,49
99
+ dhisana-0.0.1.dev237.dist-info/top_level.txt,sha256=NETTHt6YifG_P7XtRHbQiXZlgSFk9Qh9aR-ng1XTf4s,8
100
+ dhisana-0.0.1.dev237.dist-info/RECORD,,