exa-py 1.14.10__py3-none-any.whl → 1.14.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of exa-py might be problematic. Click here for more details.

exa_py/api.py CHANGED
@@ -362,17 +362,17 @@ class _Result:
362
362
  subpages: Optional[List[_Result]] = None
363
363
  extras: Optional[Dict] = None
364
364
 
365
- def __init__(self, **kwargs):
366
- self.url = kwargs["url"]
367
- self.id = kwargs["id"]
368
- self.title = kwargs.get("title")
369
- self.score = kwargs.get("score")
370
- self.published_date = kwargs.get("published_date")
371
- self.author = kwargs.get("author")
372
- self.image = kwargs.get("image")
373
- self.favicon = kwargs.get("favicon")
374
- self.subpages = kwargs.get("subpages")
375
- self.extras = kwargs.get("extras")
365
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None):
366
+ self.url = url
367
+ self.id = id
368
+ self.title = title
369
+ self.score = score
370
+ self.published_date = published_date
371
+ self.author = author
372
+ self.image = image
373
+ self.favicon = favicon
374
+ self.subpages = subpages
375
+ self.extras = extras
376
376
 
377
377
  def __str__(self):
378
378
  return (
@@ -406,12 +406,12 @@ class Result(_Result):
406
406
  highlight_scores: Optional[List[float]] = None
407
407
  summary: Optional[str] = None
408
408
 
409
- def __init__(self, **kwargs):
410
- super().__init__(**kwargs)
411
- self.text = kwargs.get("text")
412
- self.highlights = kwargs.get("highlights")
413
- self.highlight_scores = kwargs.get("highlight_scores")
414
- self.summary = kwargs.get("summary")
409
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, text=None, highlights=None, highlight_scores=None, summary=None):
410
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
411
+ self.text = text
412
+ self.highlights = highlights
413
+ self.highlight_scores = highlight_scores
414
+ self.summary = summary
415
415
 
416
416
  def __str__(self):
417
417
  base_str = super().__str__()
@@ -434,9 +434,9 @@ class ResultWithText(_Result):
434
434
 
435
435
  text: str = dataclasses.field(default_factory=str)
436
436
 
437
- def __init__(self, **kwargs):
438
- super().__init__(**kwargs)
439
- self.text = kwargs["text"]
437
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, text=""):
438
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
439
+ self.text = text
440
440
 
441
441
  def __str__(self):
442
442
  base_str = super().__str__()
@@ -456,10 +456,10 @@ class ResultWithHighlights(_Result):
456
456
  highlights: List[str] = dataclasses.field(default_factory=list)
457
457
  highlight_scores: List[float] = dataclasses.field(default_factory=list)
458
458
 
459
- def __init__(self, **kwargs):
460
- super().__init__(**kwargs)
461
- self.highlights = kwargs["highlights"]
462
- self.highlight_scores = kwargs["highlight_scores"]
459
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, highlights=None, highlight_scores=None):
460
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
461
+ self.highlights = highlights if highlights is not None else []
462
+ self.highlight_scores = highlight_scores if highlight_scores is not None else []
463
463
 
464
464
  def __str__(self):
465
465
  base_str = super().__str__()
@@ -484,11 +484,11 @@ class ResultWithTextAndHighlights(_Result):
484
484
  highlights: List[str] = dataclasses.field(default_factory=list)
485
485
  highlight_scores: List[float] = dataclasses.field(default_factory=list)
486
486
 
487
- def __init__(self, **kwargs):
488
- super().__init__(**kwargs)
489
- self.text = kwargs["text"]
490
- self.highlights = kwargs["highlights"]
491
- self.highlight_scores = kwargs["highlight_scores"]
487
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, text="", highlights=None, highlight_scores=None):
488
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
489
+ self.text = text
490
+ self.highlights = highlights if highlights is not None else []
491
+ self.highlight_scores = highlight_scores if highlight_scores is not None else []
492
492
 
493
493
  def __str__(self):
494
494
  base_str = super().__str__()
@@ -510,9 +510,9 @@ class ResultWithSummary(_Result):
510
510
 
511
511
  summary: str = dataclasses.field(default_factory=str)
512
512
 
513
- def __init__(self, **kwargs):
514
- super().__init__(**kwargs)
515
- self.summary = kwargs["summary"]
513
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, summary=""):
514
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
515
+ self.summary = summary
516
516
 
517
517
  def __str__(self):
518
518
  base_str = super().__str__()
@@ -532,10 +532,10 @@ class ResultWithTextAndSummary(_Result):
532
532
  text: str = dataclasses.field(default_factory=str)
533
533
  summary: str = dataclasses.field(default_factory=str)
534
534
 
535
- def __init__(self, **kwargs):
536
- super().__init__(**kwargs)
537
- self.text = kwargs["text"]
538
- self.summary = kwargs["summary"]
535
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, text="", summary=""):
536
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
537
+ self.text = text
538
+ self.summary = summary
539
539
 
540
540
  def __str__(self):
541
541
  base_str = super().__str__()
@@ -557,11 +557,11 @@ class ResultWithHighlightsAndSummary(_Result):
557
557
  highlight_scores: List[float] = dataclasses.field(default_factory=list)
558
558
  summary: str = dataclasses.field(default_factory=str)
559
559
 
560
- def __init__(self, **kwargs):
561
- super().__init__(**kwargs)
562
- self.highlights = kwargs["highlights"]
563
- self.highlight_scores = kwargs["highlight_scores"]
564
- self.summary = kwargs["summary"]
560
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, highlights=None, highlight_scores=None, summary=""):
561
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
562
+ self.highlights = highlights if highlights is not None else []
563
+ self.highlight_scores = highlight_scores if highlight_scores is not None else []
564
+ self.summary = summary
565
565
 
566
566
  def __str__(self):
567
567
  base_str = super().__str__()
@@ -589,12 +589,12 @@ class ResultWithTextAndHighlightsAndSummary(_Result):
589
589
  highlight_scores: List[float] = dataclasses.field(default_factory=list)
590
590
  summary: str = dataclasses.field(default_factory=str)
591
591
 
592
- def __init__(self, **kwargs):
593
- super().__init__(**kwargs)
594
- self.text = kwargs["text"]
595
- self.highlights = kwargs["highlights"]
596
- self.highlight_scores = kwargs["highlight_scores"]
597
- self.summary = kwargs["summary"]
592
+ def __init__(self, url, id, title=None, score=None, published_date=None, author=None, image=None, favicon=None, subpages=None, extras=None, text="", highlights=None, highlight_scores=None, summary=""):
593
+ super().__init__(url, id, title, score, published_date, author, image, favicon, subpages, extras)
594
+ self.text = text
595
+ self.highlights = highlights if highlights is not None else []
596
+ self.highlight_scores = highlight_scores if highlight_scores is not None else []
597
+ self.summary = summary
598
598
 
599
599
  def __str__(self):
600
600
  base_str = super().__str__()
@@ -626,13 +626,13 @@ class AnswerResult:
626
626
  author: Optional[str] = None
627
627
  text: Optional[str] = None
628
628
 
629
- def __init__(self, **kwargs):
630
- self.id = kwargs["id"]
631
- self.url = kwargs["url"]
632
- self.title = kwargs.get("title")
633
- self.published_date = kwargs.get("published_date")
634
- self.author = kwargs.get("author")
635
- self.text = kwargs.get("text")
629
+ def __init__(self, id, url, title=None, published_date=None, author=None, text=None):
630
+ self.id = id
631
+ self.url = url
632
+ self.title = title
633
+ self.published_date = published_date
634
+ self.author = author
635
+ self.text = text
636
636
 
637
637
  def __str__(self):
638
638
  return (
@@ -733,9 +733,17 @@ class StreamAnswerResponse:
733
733
  and chunk["citations"]
734
734
  and chunk["citations"] != "null"
735
735
  ):
736
- citations = [
737
- AnswerResult(**to_snake_case(s)) for s in chunk["citations"]
738
- ]
736
+ citations = []
737
+ for s in chunk["citations"]:
738
+ snake_s = to_snake_case(s)
739
+ citations.append(AnswerResult(
740
+ id=snake_s.get("id"),
741
+ url=snake_s.get("url"),
742
+ title=snake_s.get("title"),
743
+ published_date=snake_s.get("published_date"),
744
+ author=snake_s.get("author"),
745
+ text=snake_s.get("text")
746
+ ))
739
747
 
740
748
  stream_chunk = StreamChunk(content=content, citations=citations)
741
749
  if stream_chunk.has_data():
@@ -782,9 +790,17 @@ class AsyncStreamAnswerResponse:
782
790
  and chunk["citations"]
783
791
  and chunk["citations"] != "null"
784
792
  ):
785
- citations = [
786
- AnswerResult(**to_snake_case(s)) for s in chunk["citations"]
787
- ]
793
+ citations = []
794
+ for s in chunk["citations"]:
795
+ snake_s = to_snake_case(s)
796
+ citations.append(AnswerResult(
797
+ id=snake_s.get("id"),
798
+ url=snake_s.get("url"),
799
+ title=snake_s.get("title"),
800
+ published_date=snake_s.get("published_date"),
801
+ author=snake_s.get("author"),
802
+ text=snake_s.get("text")
803
+ ))
788
804
 
789
805
  stream_chunk = StreamChunk(content=content, citations=citations)
790
806
  if stream_chunk.has_data():
@@ -1008,8 +1024,27 @@ class Exa:
1008
1024
  options = to_camel_case(options)
1009
1025
  data = self.request("/search", options)
1010
1026
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
1027
+ results = []
1028
+ for result in data["results"]:
1029
+ snake_result = to_snake_case(result)
1030
+ results.append(Result(
1031
+ url=snake_result.get("url"),
1032
+ id=snake_result.get("id"),
1033
+ title=snake_result.get("title"),
1034
+ score=snake_result.get("score"),
1035
+ published_date=snake_result.get("published_date"),
1036
+ author=snake_result.get("author"),
1037
+ image=snake_result.get("image"),
1038
+ favicon=snake_result.get("favicon"),
1039
+ subpages=snake_result.get("subpages"),
1040
+ extras=snake_result.get("extras"),
1041
+ text=snake_result.get("text"),
1042
+ highlights=snake_result.get("highlights"),
1043
+ highlight_scores=snake_result.get("highlight_scores"),
1044
+ summary=snake_result.get("summary")
1045
+ ))
1011
1046
  return SearchResponse(
1012
- [Result(**to_snake_case(result)) for result in data["results"]],
1047
+ results,
1013
1048
  data["autopromptString"] if "autopromptString" in data else None,
1014
1049
  data["resolvedSearchType"] if "resolvedSearchType" in data else None,
1015
1050
  data["autoDate"] if "autoDate" in data else None,
@@ -1245,7 +1280,10 @@ class Exa:
1245
1280
  ) -> SearchResponse[ResultWithTextAndHighlightsAndSummary]: ...
1246
1281
 
1247
1282
  def search_and_contents(self, query: str, **kwargs):
1248
- options = {k: v for k, v in {"query": query, **kwargs}.items() if v is not None}
1283
+ options = {"query": query}
1284
+ for k, v in kwargs.items():
1285
+ if v is not None:
1286
+ options[k] = v
1249
1287
  # If user didn't ask for any particular content, default to text
1250
1288
  if (
1251
1289
  "text" not in options
@@ -1255,14 +1293,11 @@ class Exa:
1255
1293
  ):
1256
1294
  options["text"] = True
1257
1295
 
1258
- validate_search_options(
1259
- options,
1260
- {
1261
- **SEARCH_OPTIONS_TYPES,
1262
- **CONTENTS_OPTIONS_TYPES,
1263
- **CONTENTS_ENDPOINT_OPTIONS_TYPES,
1264
- },
1265
- )
1296
+ merged_options = {}
1297
+ merged_options.update(SEARCH_OPTIONS_TYPES)
1298
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
1299
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
1300
+ validate_search_options(options, merged_options)
1266
1301
 
1267
1302
  # Nest the appropriate fields under "contents"
1268
1303
  options = nest_fields(
@@ -1283,8 +1318,27 @@ class Exa:
1283
1318
  options = to_camel_case(options)
1284
1319
  data = self.request("/search", options)
1285
1320
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
1321
+ results = []
1322
+ for result in data["results"]:
1323
+ snake_result = to_snake_case(result)
1324
+ results.append(Result(
1325
+ url=snake_result.get("url"),
1326
+ id=snake_result.get("id"),
1327
+ title=snake_result.get("title"),
1328
+ score=snake_result.get("score"),
1329
+ published_date=snake_result.get("published_date"),
1330
+ author=snake_result.get("author"),
1331
+ image=snake_result.get("image"),
1332
+ favicon=snake_result.get("favicon"),
1333
+ subpages=snake_result.get("subpages"),
1334
+ extras=snake_result.get("extras"),
1335
+ text=snake_result.get("text"),
1336
+ highlights=snake_result.get("highlights"),
1337
+ highlight_scores=snake_result.get("highlight_scores"),
1338
+ summary=snake_result.get("summary")
1339
+ ))
1286
1340
  return SearchResponse(
1287
- [Result(**to_snake_case(result)) for result in data["results"]],
1341
+ results,
1288
1342
  data["autopromptString"] if "autopromptString" in data else None,
1289
1343
  data["resolvedSearchType"] if "resolvedSearchType" in data else None,
1290
1344
  data["autoDate"] if "autoDate" in data else None,
@@ -1416,11 +1470,11 @@ class Exa:
1416
1470
  ) -> SearchResponse[ResultWithTextAndHighlightsAndSummary]: ...
1417
1471
 
1418
1472
  def get_contents(self, urls: Union[str, List[str], List[_Result]], **kwargs):
1419
- options = {
1420
- k: v
1421
- for k, v in {"urls": urls, **kwargs}.items()
1422
- if k != "self" and v is not None
1423
- }
1473
+ options = {"urls": urls}
1474
+ for k, v in kwargs.items():
1475
+ if k != "self" and v is not None:
1476
+ options[k] = v
1477
+
1424
1478
  if (
1425
1479
  "text" not in options
1426
1480
  and "highlights" not in options
@@ -1429,16 +1483,41 @@ class Exa:
1429
1483
  ):
1430
1484
  options["text"] = True
1431
1485
 
1432
- validate_search_options(
1433
- options,
1434
- {**CONTENTS_OPTIONS_TYPES, **CONTENTS_ENDPOINT_OPTIONS_TYPES},
1435
- )
1486
+ merged_options = {}
1487
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
1488
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
1489
+ validate_search_options(options, merged_options)
1436
1490
  options = to_camel_case(options)
1437
1491
  data = self.request("/contents", options)
1438
1492
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
1439
- statuses = [ContentStatus(**status) for status in data.get("statuses", [])]
1493
+ statuses = []
1494
+ for status in data.get("statuses", []):
1495
+ statuses.append(ContentStatus(
1496
+ id=status.get("id"),
1497
+ status=status.get("status"),
1498
+ source=status.get("source")
1499
+ ))
1500
+ results = []
1501
+ for result in data["results"]:
1502
+ snake_result = to_snake_case(result)
1503
+ results.append(Result(
1504
+ url=snake_result.get("url"),
1505
+ id=snake_result.get("id"),
1506
+ title=snake_result.get("title"),
1507
+ score=snake_result.get("score"),
1508
+ published_date=snake_result.get("published_date"),
1509
+ author=snake_result.get("author"),
1510
+ image=snake_result.get("image"),
1511
+ favicon=snake_result.get("favicon"),
1512
+ subpages=snake_result.get("subpages"),
1513
+ extras=snake_result.get("extras"),
1514
+ text=snake_result.get("text"),
1515
+ highlights=snake_result.get("highlights"),
1516
+ highlight_scores=snake_result.get("highlight_scores"),
1517
+ summary=snake_result.get("summary")
1518
+ ))
1440
1519
  return SearchResponse(
1441
- [Result(**to_snake_case(result)) for result in data["results"]],
1520
+ results,
1442
1521
  data.get("autopromptString"),
1443
1522
  data.get("resolvedSearchType"),
1444
1523
  data.get("autoDate"),
@@ -1489,8 +1568,27 @@ class Exa:
1489
1568
  options = to_camel_case(options)
1490
1569
  data = self.request("/findSimilar", options)
1491
1570
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
1571
+ results = []
1572
+ for result in data["results"]:
1573
+ snake_result = to_snake_case(result)
1574
+ results.append(Result(
1575
+ url=snake_result.get("url"),
1576
+ id=snake_result.get("id"),
1577
+ title=snake_result.get("title"),
1578
+ score=snake_result.get("score"),
1579
+ published_date=snake_result.get("published_date"),
1580
+ author=snake_result.get("author"),
1581
+ image=snake_result.get("image"),
1582
+ favicon=snake_result.get("favicon"),
1583
+ subpages=snake_result.get("subpages"),
1584
+ extras=snake_result.get("extras"),
1585
+ text=snake_result.get("text"),
1586
+ highlights=snake_result.get("highlights"),
1587
+ highlight_scores=snake_result.get("highlight_scores"),
1588
+ summary=snake_result.get("summary")
1589
+ ))
1492
1590
  return SearchResponse(
1493
- [Result(**to_snake_case(result)) for result in data["results"]],
1591
+ results,
1494
1592
  data.get("autopromptString"),
1495
1593
  data.get("resolvedSearchType"),
1496
1594
  data.get("autoDate"),
@@ -1710,7 +1808,10 @@ class Exa:
1710
1808
  ) -> SearchResponse[ResultWithTextAndHighlightsAndSummary]: ...
1711
1809
 
1712
1810
  def find_similar_and_contents(self, url: str, **kwargs):
1713
- options = {k: v for k, v in {"url": url, **kwargs}.items() if v is not None}
1811
+ options = {"url": url}
1812
+ for k, v in kwargs.items():
1813
+ if v is not None:
1814
+ options[k] = v
1714
1815
  # Default to text if none specified
1715
1816
  if (
1716
1817
  "text" not in options
@@ -1719,14 +1820,11 @@ class Exa:
1719
1820
  ):
1720
1821
  options["text"] = True
1721
1822
 
1722
- validate_search_options(
1723
- options,
1724
- {
1725
- **FIND_SIMILAR_OPTIONS_TYPES,
1726
- **CONTENTS_OPTIONS_TYPES,
1727
- **CONTENTS_ENDPOINT_OPTIONS_TYPES,
1728
- },
1729
- )
1823
+ merged_options = {}
1824
+ merged_options.update(FIND_SIMILAR_OPTIONS_TYPES)
1825
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
1826
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
1827
+ validate_search_options(options, merged_options)
1730
1828
  # We nest the content fields
1731
1829
  options = nest_fields(
1732
1830
  options,
@@ -1746,8 +1844,27 @@ class Exa:
1746
1844
  options = to_camel_case(options)
1747
1845
  data = self.request("/findSimilar", options)
1748
1846
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
1847
+ results = []
1848
+ for result in data["results"]:
1849
+ snake_result = to_snake_case(result)
1850
+ results.append(Result(
1851
+ url=snake_result.get("url"),
1852
+ id=snake_result.get("id"),
1853
+ title=snake_result.get("title"),
1854
+ score=snake_result.get("score"),
1855
+ published_date=snake_result.get("published_date"),
1856
+ author=snake_result.get("author"),
1857
+ image=snake_result.get("image"),
1858
+ favicon=snake_result.get("favicon"),
1859
+ subpages=snake_result.get("subpages"),
1860
+ extras=snake_result.get("extras"),
1861
+ text=snake_result.get("text"),
1862
+ highlights=snake_result.get("highlights"),
1863
+ highlight_scores=snake_result.get("highlight_scores"),
1864
+ summary=snake_result.get("summary")
1865
+ ))
1749
1866
  return SearchResponse(
1750
- [Result(**to_snake_case(result)) for result in data["results"]],
1867
+ results,
1751
1868
  data.get("autopromptString"),
1752
1869
  data.get("resolvedSearchType"),
1753
1870
  data.get("autoDate"),
@@ -1813,10 +1930,8 @@ class Exa:
1813
1930
  "flags": flags,
1814
1931
  }
1815
1932
 
1816
- create_kwargs = {
1817
- "model": model,
1818
- **openai_kwargs,
1819
- }
1933
+ create_kwargs = {"model": model}
1934
+ create_kwargs.update(openai_kwargs)
1820
1935
 
1821
1936
  return self._create_with_tool(
1822
1937
  create_fn=func,
@@ -1871,7 +1986,23 @@ class Exa:
1871
1986
  )
1872
1987
 
1873
1988
  # We do a search_and_contents automatically
1874
- exa_result = self.search_and_contents(query, **exa_kwargs)
1989
+ exa_result = self.search_and_contents(
1990
+ query=query,
1991
+ num_results=exa_kwargs.get("num_results"),
1992
+ include_domains=exa_kwargs.get("include_domains"),
1993
+ exclude_domains=exa_kwargs.get("exclude_domains"),
1994
+ highlights=exa_kwargs.get("highlights"),
1995
+ start_crawl_date=exa_kwargs.get("start_crawl_date"),
1996
+ end_crawl_date=exa_kwargs.get("end_crawl_date"),
1997
+ start_published_date=exa_kwargs.get("start_published_date"),
1998
+ end_published_date=exa_kwargs.get("end_published_date"),
1999
+ include_text=exa_kwargs.get("include_text"),
2000
+ exclude_text=exa_kwargs.get("exclude_text"),
2001
+ use_autoprompt=exa_kwargs.get("use_autoprompt"),
2002
+ type=exa_kwargs.get("type"),
2003
+ category=exa_kwargs.get("category"),
2004
+ flags=exa_kwargs.get("flags")
2005
+ )
1875
2006
  exa_str = format_exa_result(exa_result, max_len=max_len)
1876
2007
  new_messages = add_message_to_messages(completion, messages, exa_str)
1877
2008
  completion = create_fn(messages=new_messages, **create_kwargs)
@@ -1928,10 +2059,18 @@ class Exa:
1928
2059
  options = to_camel_case(options)
1929
2060
  response = self.request("/answer", options)
1930
2061
 
1931
- return AnswerResponse(
1932
- response["answer"],
1933
- [AnswerResult(**to_snake_case(result)) for result in response["citations"]],
1934
- )
2062
+ citations = []
2063
+ for result in response["citations"]:
2064
+ snake_result = to_snake_case(result)
2065
+ citations.append(AnswerResult(
2066
+ id=snake_result.get("id"),
2067
+ url=snake_result.get("url"),
2068
+ title=snake_result.get("title"),
2069
+ published_date=snake_result.get("published_date"),
2070
+ author=snake_result.get("author"),
2071
+ text=snake_result.get("text")
2072
+ ))
2073
+ return AnswerResponse(response["answer"], citations)
1935
2074
 
1936
2075
  def stream_answer(
1937
2076
  self,
@@ -2053,8 +2192,27 @@ class AsyncExa(Exa):
2053
2192
  options = to_camel_case(options)
2054
2193
  data = await self.async_request("/search", options)
2055
2194
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
2195
+ results = []
2196
+ for result in data["results"]:
2197
+ snake_result = to_snake_case(result)
2198
+ results.append(Result(
2199
+ url=snake_result.get("url"),
2200
+ id=snake_result.get("id"),
2201
+ title=snake_result.get("title"),
2202
+ score=snake_result.get("score"),
2203
+ published_date=snake_result.get("published_date"),
2204
+ author=snake_result.get("author"),
2205
+ image=snake_result.get("image"),
2206
+ favicon=snake_result.get("favicon"),
2207
+ subpages=snake_result.get("subpages"),
2208
+ extras=snake_result.get("extras"),
2209
+ text=snake_result.get("text"),
2210
+ highlights=snake_result.get("highlights"),
2211
+ highlight_scores=snake_result.get("highlight_scores"),
2212
+ summary=snake_result.get("summary")
2213
+ ))
2056
2214
  return SearchResponse(
2057
- [Result(**to_snake_case(result)) for result in data["results"]],
2215
+ results,
2058
2216
  data["autopromptString"] if "autopromptString" in data else None,
2059
2217
  data["resolvedSearchType"] if "resolvedSearchType" in data else None,
2060
2218
  data["autoDate"] if "autoDate" in data else None,
@@ -2062,7 +2220,10 @@ class AsyncExa(Exa):
2062
2220
  )
2063
2221
 
2064
2222
  async def search_and_contents(self, query: str, **kwargs):
2065
- options = {k: v for k, v in {"query": query, **kwargs}.items() if v is not None}
2223
+ options = {"query": query}
2224
+ for k, v in kwargs.items():
2225
+ if v is not None:
2226
+ options[k] = v
2066
2227
  # If user didn't ask for any particular content, default to text
2067
2228
  if (
2068
2229
  "text" not in options
@@ -2072,14 +2233,11 @@ class AsyncExa(Exa):
2072
2233
  ):
2073
2234
  options["text"] = True
2074
2235
 
2075
- validate_search_options(
2076
- options,
2077
- {
2078
- **SEARCH_OPTIONS_TYPES,
2079
- **CONTENTS_OPTIONS_TYPES,
2080
- **CONTENTS_ENDPOINT_OPTIONS_TYPES,
2081
- },
2082
- )
2236
+ merged_options = {}
2237
+ merged_options.update(SEARCH_OPTIONS_TYPES)
2238
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
2239
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
2240
+ validate_search_options(options, merged_options)
2083
2241
 
2084
2242
  # Nest the appropriate fields under "contents"
2085
2243
  options = nest_fields(
@@ -2100,8 +2258,27 @@ class AsyncExa(Exa):
2100
2258
  options = to_camel_case(options)
2101
2259
  data = await self.async_request("/search", options)
2102
2260
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
2261
+ results = []
2262
+ for result in data["results"]:
2263
+ snake_result = to_snake_case(result)
2264
+ results.append(Result(
2265
+ url=snake_result.get("url"),
2266
+ id=snake_result.get("id"),
2267
+ title=snake_result.get("title"),
2268
+ score=snake_result.get("score"),
2269
+ published_date=snake_result.get("published_date"),
2270
+ author=snake_result.get("author"),
2271
+ image=snake_result.get("image"),
2272
+ favicon=snake_result.get("favicon"),
2273
+ subpages=snake_result.get("subpages"),
2274
+ extras=snake_result.get("extras"),
2275
+ text=snake_result.get("text"),
2276
+ highlights=snake_result.get("highlights"),
2277
+ highlight_scores=snake_result.get("highlight_scores"),
2278
+ summary=snake_result.get("summary")
2279
+ ))
2103
2280
  return SearchResponse(
2104
- [Result(**to_snake_case(result)) for result in data["results"]],
2281
+ results,
2105
2282
  data["autopromptString"] if "autopromptString" in data else None,
2106
2283
  data["resolvedSearchType"] if "resolvedSearchType" in data else None,
2107
2284
  data["autoDate"] if "autoDate" in data else None,
@@ -2110,11 +2287,11 @@ class AsyncExa(Exa):
2110
2287
  )
2111
2288
 
2112
2289
  async def get_contents(self, urls: Union[str, List[str], List[_Result]], **kwargs):
2113
- options = {
2114
- k: v
2115
- for k, v in {"urls": urls, **kwargs}.items()
2116
- if k != "self" and v is not None
2117
- }
2290
+ options = {"urls": urls}
2291
+ for k, v in kwargs.items():
2292
+ if k != "self" and v is not None:
2293
+ options[k] = v
2294
+
2118
2295
  if (
2119
2296
  "text" not in options
2120
2297
  and "highlights" not in options
@@ -2123,16 +2300,41 @@ class AsyncExa(Exa):
2123
2300
  ):
2124
2301
  options["text"] = True
2125
2302
 
2126
- validate_search_options(
2127
- options,
2128
- {**CONTENTS_OPTIONS_TYPES, **CONTENTS_ENDPOINT_OPTIONS_TYPES},
2129
- )
2303
+ merged_options = {}
2304
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
2305
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
2306
+ validate_search_options(options, merged_options)
2130
2307
  options = to_camel_case(options)
2131
2308
  data = await self.async_request("/contents", options)
2132
2309
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
2133
- statuses = [ContentStatus(**status) for status in data.get("statuses", [])]
2310
+ statuses = []
2311
+ for status in data.get("statuses", []):
2312
+ statuses.append(ContentStatus(
2313
+ id=status.get("id"),
2314
+ status=status.get("status"),
2315
+ source=status.get("source")
2316
+ ))
2317
+ results = []
2318
+ for result in data["results"]:
2319
+ snake_result = to_snake_case(result)
2320
+ results.append(Result(
2321
+ url=snake_result.get("url"),
2322
+ id=snake_result.get("id"),
2323
+ title=snake_result.get("title"),
2324
+ score=snake_result.get("score"),
2325
+ published_date=snake_result.get("published_date"),
2326
+ author=snake_result.get("author"),
2327
+ image=snake_result.get("image"),
2328
+ favicon=snake_result.get("favicon"),
2329
+ subpages=snake_result.get("subpages"),
2330
+ extras=snake_result.get("extras"),
2331
+ text=snake_result.get("text"),
2332
+ highlights=snake_result.get("highlights"),
2333
+ highlight_scores=snake_result.get("highlight_scores"),
2334
+ summary=snake_result.get("summary")
2335
+ ))
2134
2336
  return SearchResponse(
2135
- [Result(**to_snake_case(result)) for result in data["results"]],
2337
+ results,
2136
2338
  data.get("autopromptString"),
2137
2339
  data.get("resolvedSearchType"),
2138
2340
  data.get("autoDate"),
@@ -2183,8 +2385,27 @@ class AsyncExa(Exa):
2183
2385
  options = to_camel_case(options)
2184
2386
  data = await self.async_request("/findSimilar", options)
2185
2387
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
2388
+ results = []
2389
+ for result in data["results"]:
2390
+ snake_result = to_snake_case(result)
2391
+ results.append(Result(
2392
+ url=snake_result.get("url"),
2393
+ id=snake_result.get("id"),
2394
+ title=snake_result.get("title"),
2395
+ score=snake_result.get("score"),
2396
+ published_date=snake_result.get("published_date"),
2397
+ author=snake_result.get("author"),
2398
+ image=snake_result.get("image"),
2399
+ favicon=snake_result.get("favicon"),
2400
+ subpages=snake_result.get("subpages"),
2401
+ extras=snake_result.get("extras"),
2402
+ text=snake_result.get("text"),
2403
+ highlights=snake_result.get("highlights"),
2404
+ highlight_scores=snake_result.get("highlight_scores"),
2405
+ summary=snake_result.get("summary")
2406
+ ))
2186
2407
  return SearchResponse(
2187
- [Result(**to_snake_case(result)) for result in data["results"]],
2408
+ results,
2188
2409
  data.get("autopromptString"),
2189
2410
  data.get("resolvedSearchType"),
2190
2411
  data.get("autoDate"),
@@ -2192,7 +2413,10 @@ class AsyncExa(Exa):
2192
2413
  )
2193
2414
 
2194
2415
  async def find_similar_and_contents(self, url: str, **kwargs):
2195
- options = {k: v for k, v in {"url": url, **kwargs}.items() if v is not None}
2416
+ options = {"url": url}
2417
+ for k, v in kwargs.items():
2418
+ if v is not None:
2419
+ options[k] = v
2196
2420
  # Default to text if none specified
2197
2421
  if (
2198
2422
  "text" not in options
@@ -2201,14 +2425,11 @@ class AsyncExa(Exa):
2201
2425
  ):
2202
2426
  options["text"] = True
2203
2427
 
2204
- validate_search_options(
2205
- options,
2206
- {
2207
- **FIND_SIMILAR_OPTIONS_TYPES,
2208
- **CONTENTS_OPTIONS_TYPES,
2209
- **CONTENTS_ENDPOINT_OPTIONS_TYPES,
2210
- },
2211
- )
2428
+ merged_options = {}
2429
+ merged_options.update(FIND_SIMILAR_OPTIONS_TYPES)
2430
+ merged_options.update(CONTENTS_OPTIONS_TYPES)
2431
+ merged_options.update(CONTENTS_ENDPOINT_OPTIONS_TYPES)
2432
+ validate_search_options(options, merged_options)
2212
2433
  # We nest the content fields
2213
2434
  options = nest_fields(
2214
2435
  options,
@@ -2228,8 +2449,27 @@ class AsyncExa(Exa):
2228
2449
  options = to_camel_case(options)
2229
2450
  data = await self.async_request("/findSimilar", options)
2230
2451
  cost_dollars = parse_cost_dollars(data.get("costDollars"))
2452
+ results = []
2453
+ for result in data["results"]:
2454
+ snake_result = to_snake_case(result)
2455
+ results.append(Result(
2456
+ url=snake_result.get("url"),
2457
+ id=snake_result.get("id"),
2458
+ title=snake_result.get("title"),
2459
+ score=snake_result.get("score"),
2460
+ published_date=snake_result.get("published_date"),
2461
+ author=snake_result.get("author"),
2462
+ image=snake_result.get("image"),
2463
+ favicon=snake_result.get("favicon"),
2464
+ subpages=snake_result.get("subpages"),
2465
+ extras=snake_result.get("extras"),
2466
+ text=snake_result.get("text"),
2467
+ highlights=snake_result.get("highlights"),
2468
+ highlight_scores=snake_result.get("highlight_scores"),
2469
+ summary=snake_result.get("summary")
2470
+ ))
2231
2471
  return SearchResponse(
2232
- [Result(**to_snake_case(result)) for result in data["results"]],
2472
+ results,
2233
2473
  data.get("autopromptString"),
2234
2474
  data.get("resolvedSearchType"),
2235
2475
  data.get("autoDate"),
@@ -2272,10 +2512,18 @@ class AsyncExa(Exa):
2272
2512
  options = to_camel_case(options)
2273
2513
  response = await self.async_request("/answer", options)
2274
2514
 
2275
- return AnswerResponse(
2276
- response["answer"],
2277
- [AnswerResult(**to_snake_case(result)) for result in response["citations"]],
2278
- )
2515
+ citations = []
2516
+ for result in response["citations"]:
2517
+ snake_result = to_snake_case(result)
2518
+ citations.append(AnswerResult(
2519
+ id=snake_result.get("id"),
2520
+ url=snake_result.get("url"),
2521
+ title=snake_result.get("title"),
2522
+ published_date=snake_result.get("published_date"),
2523
+ author=snake_result.get("author"),
2524
+ text=snake_result.get("text")
2525
+ ))
2526
+ return AnswerResponse(response["answer"], citations)
2279
2527
 
2280
2528
  async def stream_answer(
2281
2529
  self,
exa_py/research/client.py CHANGED
@@ -322,10 +322,24 @@ def _build_research_task(raw: Dict[str, Any]):
322
322
  from ..api import _Result, to_snake_case # noqa: WPS433 – runtime import
323
323
 
324
324
  citations_raw = raw.get("citations", {}) or {}
325
- citations_parsed = {
326
- key: [_Result(**to_snake_case(c)) for c in cites]
327
- for key, cites in citations_raw.items()
328
- }
325
+ citations_parsed = {}
326
+ for key, cites in citations_raw.items():
327
+ results = []
328
+ for c in cites:
329
+ snake_c = to_snake_case(c)
330
+ results.append(_Result(
331
+ url=snake_c.get("url"),
332
+ id=snake_c.get("id"),
333
+ title=snake_c.get("title"),
334
+ score=snake_c.get("score"),
335
+ published_date=snake_c.get("published_date"),
336
+ author=snake_c.get("author"),
337
+ image=snake_c.get("image"),
338
+ favicon=snake_c.get("favicon"),
339
+ subpages=snake_c.get("subpages"),
340
+ extras=snake_c.get("extras")
341
+ ))
342
+ citations_parsed[key] = results
329
343
 
330
344
  return ResearchTask(
331
345
  id=raw["id"],
exa_py/utils.py CHANGED
@@ -54,8 +54,8 @@ def format_exa_result(exa_result, max_len: int=-1):
54
54
 
55
55
  class ExaOpenAICompletion(ChatCompletion):
56
56
  """Exa wrapper for OpenAI completion."""
57
- def __init__(self, exa_result: Optional["SearchResponse[ResultWithText]"], **kwargs):
58
- super().__init__(**kwargs)
57
+ def __init__(self, exa_result: Optional["SearchResponse[ResultWithText]"], id, choices, created, model, object, system_fingerprint=None, usage=None):
58
+ super().__init__(id=id, choices=choices, created=created, model=model, object=object, system_fingerprint=system_fingerprint, usage=usage)
59
59
  self.exa_result = exa_result
60
60
 
61
61
 
@@ -1,7 +1,9 @@
1
1
  from .client import WebsetsClient
2
2
  from .imports import ImportsClient
3
+ from .events import EventsClient
3
4
 
4
5
  __all__ = [
5
6
  "WebsetsClient",
6
7
  "ImportsClient",
8
+ "EventsClient",
7
9
  ]
exa_py/websets/client.py CHANGED
@@ -18,6 +18,7 @@ from .enrichments import WebsetEnrichmentsClient
18
18
  from .webhooks import WebsetWebhooksClient
19
19
  from .monitors import MonitorsClient
20
20
  from .imports import ImportsClient
21
+ from .events import EventsClient
21
22
 
22
23
  class WebsetsClient(WebsetsBaseClient):
23
24
  """Client for managing Websets."""
@@ -30,6 +31,7 @@ class WebsetsClient(WebsetsBaseClient):
30
31
  self.webhooks = WebsetWebhooksClient(client)
31
32
  self.monitors = MonitorsClient(client)
32
33
  self.imports = ImportsClient(client)
34
+ self.events = EventsClient(client)
33
35
 
34
36
  def create(self, params: Union[Dict[str, Any], CreateWebsetParameters]) -> Webset:
35
37
  """Create a new Webset.
@@ -0,0 +1,3 @@
1
+ from .client import EventsClient
2
+
3
+ __all__ = ["EventsClient"]
@@ -0,0 +1,106 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import List, Optional, Union
4
+
5
+ from ..types import (
6
+ EventType,
7
+ ListEventsResponse,
8
+ WebsetCreatedEvent,
9
+ WebsetDeletedEvent,
10
+ WebsetIdleEvent,
11
+ WebsetPausedEvent,
12
+ WebsetItemCreatedEvent,
13
+ WebsetItemEnrichedEvent,
14
+ WebsetSearchCreatedEvent,
15
+ WebsetSearchUpdatedEvent,
16
+ WebsetSearchCanceledEvent,
17
+ WebsetSearchCompletedEvent,
18
+ )
19
+ from ..core.base import WebsetsBaseClient
20
+
21
+ # Type alias for all event types
22
+ Event = Union[
23
+ WebsetCreatedEvent,
24
+ WebsetDeletedEvent,
25
+ WebsetIdleEvent,
26
+ WebsetPausedEvent,
27
+ WebsetItemCreatedEvent,
28
+ WebsetItemEnrichedEvent,
29
+ WebsetSearchCreatedEvent,
30
+ WebsetSearchUpdatedEvent,
31
+ WebsetSearchCanceledEvent,
32
+ WebsetSearchCompletedEvent,
33
+ ]
34
+
35
+ class EventsClient(WebsetsBaseClient):
36
+ """Client for managing Events."""
37
+
38
+ def __init__(self, client):
39
+ super().__init__(client)
40
+
41
+ def list(self, *, cursor: Optional[str] = None, limit: Optional[int] = None,
42
+ types: Optional[List[EventType]] = None) -> ListEventsResponse:
43
+ """List all Events.
44
+
45
+ Args:
46
+ cursor (str, optional): The cursor to paginate through the results.
47
+ limit (int, optional): The number of results to return.
48
+ types (List[EventType], optional): The types of events to filter by.
49
+
50
+ Returns:
51
+ ListEventsResponse: List of events.
52
+ """
53
+ params = {}
54
+ if cursor is not None:
55
+ params["cursor"] = cursor
56
+ if limit is not None:
57
+ params["limit"] = limit
58
+ if types is not None:
59
+ # Convert EventType enums to their string values
60
+ params["types"] = [t.value if hasattr(t, 'value') else t for t in types]
61
+
62
+ response = self.request("/v0/events", params=params, method="GET")
63
+ return ListEventsResponse.model_validate(response)
64
+
65
+ def get(self, id: str) -> Event:
66
+ """Get an Event by ID.
67
+
68
+ Args:
69
+ id (str): The ID of the Event.
70
+
71
+ Returns:
72
+ Event: The retrieved event.
73
+ """
74
+ response = self.request(f"/v0/events/{id}", method="GET")
75
+
76
+ # The response should contain a 'type' field that helps us determine
77
+ # which specific event class to use for validation
78
+ event_type = response.get('type')
79
+
80
+ # Map event types to their corresponding classes
81
+ event_type_map = {
82
+ 'webset.created': WebsetCreatedEvent,
83
+ 'webset.deleted': WebsetDeletedEvent,
84
+ 'webset.idle': WebsetIdleEvent,
85
+ 'webset.paused': WebsetPausedEvent,
86
+ 'webset.item.created': WebsetItemCreatedEvent,
87
+ 'webset.item.enriched': WebsetItemEnrichedEvent,
88
+ 'webset.search.created': WebsetSearchCreatedEvent,
89
+ 'webset.search.updated': WebsetSearchUpdatedEvent,
90
+ 'webset.search.canceled': WebsetSearchCanceledEvent,
91
+ 'webset.search.completed': WebsetSearchCompletedEvent,
92
+ }
93
+
94
+ event_class = event_type_map.get(event_type)
95
+ if event_class:
96
+ return event_class.model_validate(response)
97
+ else:
98
+ # Fallback - try each type until one validates
99
+ # This shouldn't happen in normal operation
100
+ for event_class in event_type_map.values():
101
+ try:
102
+ return event_class.model_validate(response)
103
+ except Exception:
104
+ continue
105
+
106
+ raise ValueError(f"Unknown event type: {event_type}")
exa_py/websets/types.py CHANGED
@@ -6,7 +6,7 @@ from __future__ import annotations
6
6
 
7
7
  from datetime import datetime
8
8
  from enum import Enum
9
- from typing import Any, Dict, List, Literal, Optional, Union
9
+ from typing import Any, Dict, List, Literal, Optional, Union, Annotated
10
10
 
11
11
  from pydantic import AnyUrl, Field, PositiveInt, confloat, constr
12
12
  from .core.base import ExaBaseModel
@@ -287,20 +287,21 @@ class ImportSource(Enum):
287
287
 
288
288
 
289
289
  class ListEventsResponse(ExaBaseModel):
290
- data: List[
291
- Union[
292
- WebsetCreatedEvent,
293
- WebsetDeletedEvent,
294
- WebsetIdleEvent,
295
- WebsetPausedEvent,
296
- WebsetItemCreatedEvent,
297
- WebsetItemEnrichedEvent,
298
- WebsetSearchCreatedEvent,
299
- WebsetSearchUpdatedEvent,
300
- WebsetSearchCanceledEvent,
301
- WebsetSearchCompletedEvent,
302
- ]
303
- ] = Field(..., discriminator='type')
290
+ data: List[Annotated[
291
+ Union[
292
+ WebsetCreatedEvent,
293
+ WebsetDeletedEvent,
294
+ WebsetIdleEvent,
295
+ WebsetPausedEvent,
296
+ WebsetItemCreatedEvent,
297
+ WebsetItemEnrichedEvent,
298
+ WebsetSearchCreatedEvent,
299
+ WebsetSearchUpdatedEvent,
300
+ WebsetSearchCanceledEvent,
301
+ WebsetSearchCompletedEvent,
302
+ ],
303
+ Field(discriminator='type')
304
+ ]]
304
305
  """
305
306
  The list of events
306
307
  """
@@ -1440,6 +1441,17 @@ class WebsetItemPersonProperties(ExaBaseModel):
1440
1441
  """
1441
1442
 
1442
1443
 
1444
+ class WebsetItemPersonCompanyPropertiesFields(ExaBaseModel):
1445
+ name: str
1446
+ """
1447
+ The name of the company
1448
+ """
1449
+ location: Optional[str] = None
1450
+ """
1451
+ The location the person is working at the company
1452
+ """
1453
+
1454
+
1443
1455
  class WebsetItemPersonPropertiesFields(ExaBaseModel):
1444
1456
  name: str
1445
1457
  """
@@ -1453,6 +1465,10 @@ class WebsetItemPersonPropertiesFields(ExaBaseModel):
1453
1465
  """
1454
1466
  The current work position of the person
1455
1467
  """
1468
+ company: Optional[WebsetItemPersonCompanyPropertiesFields] = None
1469
+ """
1470
+ The company the person is working at
1471
+ """
1456
1472
  picture_url: Optional[AnyUrl] = Field(None, alias='pictureUrl')
1457
1473
  """
1458
1474
  The URL of the person's picture
@@ -1,24 +1,20 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: exa-py
3
- Version: 1.14.10
3
+ Version: 1.14.12
4
4
  Summary: Python SDK for Exa API.
5
+ Home-page: https://github.com/exa-labs/exa-py
6
+ Author: Exa
7
+ Author-email: Exa AI <hello@exa.ai>
5
8
  License: MIT
6
- Author: Exa AI
7
- Author-email: hello@exa.ai
8
9
  Requires-Python: >=3.9
9
- Classifier: License :: OSI Approved :: MIT License
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Programming Language :: Python :: 3.9
12
- Classifier: Programming Language :: Python :: 3.10
13
- Classifier: Programming Language :: Python :: 3.11
14
- Classifier: Programming Language :: Python :: 3.12
15
- Classifier: Programming Language :: Python :: 3.13
16
- Requires-Dist: httpx (>=0.28.1)
17
- Requires-Dist: openai (>=1.48)
18
- Requires-Dist: pydantic (>=2.10.6)
19
- Requires-Dist: requests (>=2.32.3)
20
- Requires-Dist: typing-extensions (>=4.12.2)
21
10
  Description-Content-Type: text/markdown
11
+ Requires-Dist: requests>=2.32.3
12
+ Requires-Dist: typing-extensions>=4.12.2
13
+ Requires-Dist: openai>=1.48
14
+ Requires-Dist: pydantic>=2.10.6
15
+ Requires-Dist: httpx>=0.28.1
16
+ Dynamic: author
17
+ Dynamic: home-page
22
18
 
23
19
  # Exa
24
20
 
@@ -130,4 +126,3 @@ exa = Exa(api_key="your-api-key")
130
126
  output_schema=OUTPUT_SCHEMA,
131
127
  )
132
128
  ```
133
-
@@ -1,17 +1,19 @@
1
1
  exa_py/__init__.py,sha256=M2GC9oSdoV6m2msboW0vMWWl8wrth4o6gmEV4MYLGG8,66
2
- exa_py/api.py,sha256=qcEmAuPaP2B6890OKIvAJRG0qofTu4i4TGXbQlqzDoU,86367
2
+ exa_py/api.py,sha256=IrToa0zN39SREpwwcyuFvnoE_-pIc9uWVDkEw07IN9I,99737
3
3
  exa_py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ exa_py/utils.py,sha256=sq1KOajuMWI1lygYeb9MqjXn8yqVyK0BtFZZmTwSaMY,2580
4
5
  exa_py/research/__init__.py,sha256=QeY-j6bP4QP5tF9ytX0IeQhJvd0Wn4cJCD69U8pP7kA,271
5
- exa_py/research/client.py,sha256=C2ukFq_dE1xUfhMlHwpD9cY5rDClgn8N92pH4_FEVpE,11901
6
+ exa_py/research/client.py,sha256=x9b299ROfbhdMrdto6jZCHGRJE5U5V2fWHsSlnnNkWo,12471
6
7
  exa_py/research/models.py,sha256=j7YgRoMRp2MLgnaij7775x_hJEeV5gksKpfLwmawqxY,3704
7
- exa_py/utils.py,sha256=Rc1FJjoR9LQ7L_OJM91Sd1GNkbHjcLyEvJENhRix6gc,2405
8
- exa_py/websets/__init__.py,sha256=6HQWR8_ABPeZx2sHfwGV4QQf1Eh8MRaO-kzoIO_1ua8,126
9
- exa_py/websets/_generator/pydantic/BaseModel.jinja2,sha256=RUDCmPZVamoVx1WudylscYFfDhGoNNtRYlpTvKjAiuA,1276
10
- exa_py/websets/client.py,sha256=nP0XZAEMIFvinb1DJ6gNF-8jCnKMUmBun5wZ9EgeZ0M,4891
8
+ exa_py/websets/__init__.py,sha256=x7Dc0MS8raRXA7Ud6alKgnsUmLi6X9GTqfB8kOwC9iQ,179
9
+ exa_py/websets/client.py,sha256=v8Y0p5PosjLkb7EYQ83g3nmoIIHmKaXF7JQVT8K5h2E,4967
10
+ exa_py/websets/types.py,sha256=ykJHPXNiqr2fbRYYprshgBcG9X5IRRwi2mu56g7XCZA,42085
11
11
  exa_py/websets/core/__init__.py,sha256=xOyrFaqtBocMUu321Jpbk7IzIQRNZufSIGJXrKoG-Bg,323
12
12
  exa_py/websets/core/base.py,sha256=thVIeRtlabbvueP0dAni5Nwtl9AWYv1I1Mmyc_jlYO0,4086
13
13
  exa_py/websets/enrichments/__init__.py,sha256=5dJIEKKceUost3RnI6PpCSB3VjUCBzxseEsIXu-ZY-Y,83
14
14
  exa_py/websets/enrichments/client.py,sha256=obUjn4vH6tKBMtHEBVdMzlN8in0Fx3sCP-bXx-Le1zM,2338
15
+ exa_py/websets/events/__init__.py,sha256=aFJ9O5UudtQQzndVmdB96IaM2l07qyM1B_8xKY7rp58,60
16
+ exa_py/websets/events/client.py,sha256=Hzatqp3X-K0ZGe36cjFMgbhnsErcDLdGWQVirhmHjvY,3622
15
17
  exa_py/websets/imports/__init__.py,sha256=iEl-fZZSdcvKaqLgjMES_0RwYn7hZDCMf6BZriCrjgw,64
16
18
  exa_py/websets/imports/client.py,sha256=nJs46hxlSkZm7qjboYHNBuJ62gLmA_Yzr9fc-NDky0Y,6795
17
19
  exa_py/websets/items/__init__.py,sha256=DCWZJVtRmUjnMEkKdb5gW1LT9cHcb-J8lENMnyyBeKU,71
@@ -22,9 +24,9 @@ exa_py/websets/monitors/runs/__init__.py,sha256=TmcETf3zdQouA_vAeLiosCNL1MYJnZ0y
22
24
  exa_py/websets/monitors/runs/client.py,sha256=WnwcWCf7UKk68VCNUp8mRXBtlU8vglTSX-eoWVXzKIw,1229
23
25
  exa_py/websets/searches/__init__.py,sha256=_0Zx8ES5fFTEL3T8mhLxq_xK2t0JONx6ad6AtbvClsE,77
24
26
  exa_py/websets/searches/client.py,sha256=X3f7axWGfecmxf-2tBTX0Yf_--xToz1X8ZHbbudEzy0,1790
25
- exa_py/websets/types.py,sha256=UC41VqvBcUueSqUjxmb0nLb-c7qmScoNkcmdyp9npqY,41736
26
27
  exa_py/websets/webhooks/__init__.py,sha256=iTPBCxFd73z4RifLQMX6iRECx_6pwlI5qscLNjMOUHE,77
27
28
  exa_py/websets/webhooks/client.py,sha256=zsIRMTeJU65yj-zo7Zz-gG02Prtzgcx6utGFSoY4HQQ,4222
28
- exa_py-1.14.10.dist-info/METADATA,sha256=lBJuJsuRLqjtBfgrhrHsTKl8horGLXVT4KJZNMGjm-s,4001
29
- exa_py-1.14.10.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
30
- exa_py-1.14.10.dist-info/RECORD,,
29
+ exa_py-1.14.12.dist-info/METADATA,sha256=sbyg90b0KSXgCm2UwwQBm6M5So9-0eh1eFUm4i8Fa_0,3719
30
+ exa_py-1.14.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
31
+ exa_py-1.14.12.dist-info/top_level.txt,sha256=Mfkmscdw9HWR1PtVhU1gAiVo6DHu_tyiVdb89gfZBVI,7
32
+ exa_py-1.14.12.dist-info/RECORD,,
@@ -1,4 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.3
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ exa_py
@@ -1,42 +0,0 @@
1
- {% for decorator in decorators -%}
2
- {{ decorator }}
3
- {% endfor -%}
4
- class {{ class_name }}({{ base_class }}):{% if comment is defined %} # {{ comment }}{% endif %}
5
- {%- if description %}
6
- """
7
- {{ description | indent(4) }}
8
- """
9
- {%- endif %}
10
- {%- if not fields and not description %}
11
- pass
12
- {%- endif %}
13
- {%- if config %}
14
- {%- filter indent(4) %}
15
- {%- endfilter %}
16
- {%- endif %}
17
- {%- for field in fields -%}
18
- {%- if field.name == "type" and field.field %}
19
- type: Literal['{{ field.default }}']
20
- {%- elif field.name == "object" and field.field %}
21
- object: Literal['{{ field.default }}']
22
- {%- elif not field.annotated and field.field %}
23
- {{ field.name }}: {{ field.type_hint }} = {{ field.field }}
24
- {%- else %}
25
- {%- if field.annotated %}
26
- {{ field.name }}: {{ field.annotated }}
27
- {%- else %}
28
- {{ field.name }}: {{ field.type_hint }}
29
- {%- endif %}
30
- {%- if not (field.required or (field.represented_default == 'None' and field.strip_default_none)) or field.data_type.is_optional
31
- %} = {{ field.represented_default }}
32
- {%- endif -%}
33
- {%- endif %}
34
- {%- if field.docstring %}
35
- """
36
- {{ field.docstring | indent(4) }}
37
- """
38
- {%- endif %}
39
- {%- for method in methods -%}
40
- {{ method }}
41
- {%- endfor -%}
42
- {%- endfor -%}