llama-cloud 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -71,7 +71,6 @@ class PipelinesClient:
71
71
 
72
72
  client = LlamaCloud(
73
73
  token="YOUR_TOKEN",
74
- base_url="https://yourhost.com/path/to/api",
75
74
  )
76
75
  client.pipelines.search_pipelines(
77
76
  project_name="string",
@@ -121,7 +120,6 @@ class PipelinesClient:
121
120
 
122
121
  client = LlamaCloud(
123
122
  token="YOUR_TOKEN",
124
- base_url="https://yourhost.com/path/to/api",
125
123
  )
126
124
  client.pipelines.create_pipeline(
127
125
  request=PipelineCreate(
@@ -186,7 +184,6 @@ class PipelinesClient:
186
184
 
187
185
  client = LlamaCloud(
188
186
  token="YOUR_TOKEN",
189
- base_url="https://yourhost.com/path/to/api",
190
187
  )
191
188
  client.pipelines.upsert_pipeline(
192
189
  request=PipelineCreate(
@@ -241,7 +238,6 @@ class PipelinesClient:
241
238
 
242
239
  client = LlamaCloud(
243
240
  token="YOUR_TOKEN",
244
- base_url="https://yourhost.com/path/to/api",
245
241
  )
246
242
  client.pipelines.get_pipeline(
247
243
  pipeline_id="string",
@@ -312,7 +308,6 @@ class PipelinesClient:
312
308
 
313
309
  client = LlamaCloud(
314
310
  token="YOUR_TOKEN",
315
- base_url="https://yourhost.com/path/to/api",
316
311
  )
317
312
  client.pipelines.update_existing_pipeline(
318
313
  pipeline_id="string",
@@ -376,7 +371,6 @@ class PipelinesClient:
376
371
 
377
372
  client = LlamaCloud(
378
373
  token="YOUR_TOKEN",
379
- base_url="https://yourhost.com/path/to/api",
380
374
  )
381
375
  client.pipelines.delete_pipeline(
382
376
  pipeline_id="string",
@@ -409,7 +403,6 @@ class PipelinesClient:
409
403
 
410
404
  client = LlamaCloud(
411
405
  token="YOUR_TOKEN",
412
- base_url="https://yourhost.com/path/to/api",
413
406
  )
414
407
  client.pipelines.sync_pipeline(
415
408
  pipeline_id="string",
@@ -444,7 +437,6 @@ class PipelinesClient:
444
437
 
445
438
  client = LlamaCloud(
446
439
  token="YOUR_TOKEN",
447
- base_url="https://yourhost.com/path/to/api",
448
440
  )
449
441
  client.pipelines.get_eval_dataset_executions(
450
442
  pipeline_id="string",
@@ -495,7 +487,6 @@ class PipelinesClient:
495
487
 
496
488
  client = LlamaCloud(
497
489
  token="YOUR_TOKEN",
498
- base_url="https://yourhost.com/path/to/api",
499
490
  )
500
491
  client.pipelines.execute_eval_dataset(
501
492
  pipeline_id="string",
@@ -547,7 +538,6 @@ class PipelinesClient:
547
538
 
548
539
  client = LlamaCloud(
549
540
  token="YOUR_TOKEN",
550
- base_url="https://yourhost.com/path/to/api",
551
541
  )
552
542
  client.pipelines.get_eval_dataset_execution_result(
553
543
  pipeline_id="string",
@@ -590,7 +580,6 @@ class PipelinesClient:
590
580
 
591
581
  client = LlamaCloud(
592
582
  token="YOUR_TOKEN",
593
- base_url="https://yourhost.com/path/to/api",
594
583
  )
595
584
  client.pipelines.get_eval_dataset_execution(
596
585
  pipeline_id="string",
@@ -628,7 +617,6 @@ class PipelinesClient:
628
617
 
629
618
  client = LlamaCloud(
630
619
  token="YOUR_TOKEN",
631
- base_url="https://yourhost.com/path/to/api",
632
620
  )
633
621
  client.pipelines.get_files_for_pipeline(
634
622
  pipeline_id="string",
@@ -665,7 +653,6 @@ class PipelinesClient:
665
653
 
666
654
  client = LlamaCloud(
667
655
  token="YOUR_TOKEN",
668
- base_url="https://yourhost.com/path/to/api",
669
656
  )
670
657
  client.pipelines.add_files_to_pipeline(
671
658
  pipeline_id="string",
@@ -702,7 +689,6 @@ class PipelinesClient:
702
689
 
703
690
  client = LlamaCloud(
704
691
  token="YOUR_TOKEN",
705
- base_url="https://yourhost.com/path/to/api",
706
692
  )
707
693
  client.pipelines.get_pipeline_file_status(
708
694
  pipeline_id="string",
@@ -748,7 +734,6 @@ class PipelinesClient:
748
734
 
749
735
  client = LlamaCloud(
750
736
  token="YOUR_TOKEN",
751
- base_url="https://yourhost.com/path/to/api",
752
737
  )
753
738
  client.pipelines.update_pipeline_file(
754
739
  pipeline_id="string",
@@ -790,7 +775,6 @@ class PipelinesClient:
790
775
 
791
776
  client = LlamaCloud(
792
777
  token="YOUR_TOKEN",
793
- base_url="https://yourhost.com/path/to/api",
794
778
  )
795
779
  client.pipelines.delete_pipeline_file(
796
780
  pipeline_id="string",
@@ -826,7 +810,6 @@ class PipelinesClient:
826
810
 
827
811
  client = LlamaCloud(
828
812
  token="YOUR_TOKEN",
829
- base_url="https://yourhost.com/path/to/api",
830
813
  )
831
814
  client.pipelines.get_pipeline_data_sources(
832
815
  pipeline_id="string",
@@ -865,7 +848,6 @@ class PipelinesClient:
865
848
 
866
849
  client = LlamaCloud(
867
850
  token="YOUR_TOKEN",
868
- base_url="https://yourhost.com/path/to/api",
869
851
  )
870
852
  client.pipelines.add_data_sources_to_pipeline(
871
853
  pipeline_id="string",
@@ -904,7 +886,6 @@ class PipelinesClient:
904
886
 
905
887
  client = LlamaCloud(
906
888
  token="YOUR_TOKEN",
907
- base_url="https://yourhost.com/path/to/api",
908
889
  )
909
890
  client.pipelines.delete_pipeline_data_source(
910
891
  pipeline_id="string",
@@ -943,7 +924,6 @@ class PipelinesClient:
943
924
 
944
925
  client = LlamaCloud(
945
926
  token="YOUR_TOKEN",
946
- base_url="https://yourhost.com/path/to/api",
947
927
  )
948
928
  client.pipelines.sync_pipeline_data_source(
949
929
  pipeline_id="string",
@@ -1006,7 +986,6 @@ class PipelinesClient:
1006
986
 
1007
987
  client = LlamaCloud(
1008
988
  token="YOUR_TOKEN",
1009
- base_url="https://yourhost.com/path/to/api",
1010
989
  )
1011
990
  client.pipelines.run_search(
1012
991
  pipeline_id="string",
@@ -1058,7 +1037,6 @@ class PipelinesClient:
1058
1037
 
1059
1038
  client = LlamaCloud(
1060
1039
  token="YOUR_TOKEN",
1061
- base_url="https://yourhost.com/path/to/api",
1062
1040
  )
1063
1041
  client.pipelines.get_pipeline_jobs(
1064
1042
  pipeline_id="string",
@@ -1093,7 +1071,6 @@ class PipelinesClient:
1093
1071
 
1094
1072
  client = LlamaCloud(
1095
1073
  token="YOUR_TOKEN",
1096
- base_url="https://yourhost.com/path/to/api",
1097
1074
  )
1098
1075
  client.pipelines.get_pipeline_job(
1099
1076
  pipeline_id="string",
@@ -1135,7 +1112,6 @@ class PipelinesClient:
1135
1112
 
1136
1113
  client = LlamaCloud(
1137
1114
  token="YOUR_TOKEN",
1138
- base_url="https://yourhost.com/path/to/api",
1139
1115
  )
1140
1116
  client.pipelines.list_pipeline_documents(
1141
1117
  pipeline_id="string",
@@ -1175,7 +1151,6 @@ class PipelinesClient:
1175
1151
 
1176
1152
  client = LlamaCloud(
1177
1153
  token="YOUR_TOKEN",
1178
- base_url="https://yourhost.com/path/to/api",
1179
1154
  )
1180
1155
  client.pipelines.create_batch_pipeline_documents(
1181
1156
  pipeline_id="string",
@@ -1216,7 +1191,6 @@ class PipelinesClient:
1216
1191
 
1217
1192
  client = LlamaCloud(
1218
1193
  token="YOUR_TOKEN",
1219
- base_url="https://yourhost.com/path/to/api",
1220
1194
  )
1221
1195
  client.pipelines.upsert_batch_pipeline_documents(
1222
1196
  pipeline_id="string",
@@ -1255,7 +1229,6 @@ class PipelinesClient:
1255
1229
 
1256
1230
  client = LlamaCloud(
1257
1231
  token="YOUR_TOKEN",
1258
- base_url="https://yourhost.com/path/to/api",
1259
1232
  )
1260
1233
  client.pipelines.get_pipeline_document(
1261
1234
  pipeline_id="string",
@@ -1293,7 +1266,6 @@ class PipelinesClient:
1293
1266
 
1294
1267
  client = LlamaCloud(
1295
1268
  token="YOUR_TOKEN",
1296
- base_url="https://yourhost.com/path/to/api",
1297
1269
  )
1298
1270
  client.pipelines.delete_pipeline_document(
1299
1271
  pipeline_id="string",
@@ -1331,7 +1303,6 @@ class PipelinesClient:
1331
1303
 
1332
1304
  client = LlamaCloud(
1333
1305
  token="YOUR_TOKEN",
1334
- base_url="https://yourhost.com/path/to/api",
1335
1306
  )
1336
1307
  client.pipelines.get_pipeline_document_status(
1337
1308
  pipeline_id="string",
@@ -1384,7 +1355,6 @@ class AsyncPipelinesClient:
1384
1355
 
1385
1356
  client = AsyncLlamaCloud(
1386
1357
  token="YOUR_TOKEN",
1387
- base_url="https://yourhost.com/path/to/api",
1388
1358
  )
1389
1359
  await client.pipelines.search_pipelines(
1390
1360
  project_name="string",
@@ -1434,7 +1404,6 @@ class AsyncPipelinesClient:
1434
1404
 
1435
1405
  client = AsyncLlamaCloud(
1436
1406
  token="YOUR_TOKEN",
1437
- base_url="https://yourhost.com/path/to/api",
1438
1407
  )
1439
1408
  await client.pipelines.create_pipeline(
1440
1409
  request=PipelineCreate(
@@ -1499,7 +1468,6 @@ class AsyncPipelinesClient:
1499
1468
 
1500
1469
  client = AsyncLlamaCloud(
1501
1470
  token="YOUR_TOKEN",
1502
- base_url="https://yourhost.com/path/to/api",
1503
1471
  )
1504
1472
  await client.pipelines.upsert_pipeline(
1505
1473
  request=PipelineCreate(
@@ -1554,7 +1522,6 @@ class AsyncPipelinesClient:
1554
1522
 
1555
1523
  client = AsyncLlamaCloud(
1556
1524
  token="YOUR_TOKEN",
1557
- base_url="https://yourhost.com/path/to/api",
1558
1525
  )
1559
1526
  await client.pipelines.get_pipeline(
1560
1527
  pipeline_id="string",
@@ -1625,7 +1592,6 @@ class AsyncPipelinesClient:
1625
1592
 
1626
1593
  client = AsyncLlamaCloud(
1627
1594
  token="YOUR_TOKEN",
1628
- base_url="https://yourhost.com/path/to/api",
1629
1595
  )
1630
1596
  await client.pipelines.update_existing_pipeline(
1631
1597
  pipeline_id="string",
@@ -1689,7 +1655,6 @@ class AsyncPipelinesClient:
1689
1655
 
1690
1656
  client = AsyncLlamaCloud(
1691
1657
  token="YOUR_TOKEN",
1692
- base_url="https://yourhost.com/path/to/api",
1693
1658
  )
1694
1659
  await client.pipelines.delete_pipeline(
1695
1660
  pipeline_id="string",
@@ -1722,7 +1687,6 @@ class AsyncPipelinesClient:
1722
1687
 
1723
1688
  client = AsyncLlamaCloud(
1724
1689
  token="YOUR_TOKEN",
1725
- base_url="https://yourhost.com/path/to/api",
1726
1690
  )
1727
1691
  await client.pipelines.sync_pipeline(
1728
1692
  pipeline_id="string",
@@ -1759,7 +1723,6 @@ class AsyncPipelinesClient:
1759
1723
 
1760
1724
  client = AsyncLlamaCloud(
1761
1725
  token="YOUR_TOKEN",
1762
- base_url="https://yourhost.com/path/to/api",
1763
1726
  )
1764
1727
  await client.pipelines.get_eval_dataset_executions(
1765
1728
  pipeline_id="string",
@@ -1810,7 +1773,6 @@ class AsyncPipelinesClient:
1810
1773
 
1811
1774
  client = AsyncLlamaCloud(
1812
1775
  token="YOUR_TOKEN",
1813
- base_url="https://yourhost.com/path/to/api",
1814
1776
  )
1815
1777
  await client.pipelines.execute_eval_dataset(
1816
1778
  pipeline_id="string",
@@ -1862,7 +1824,6 @@ class AsyncPipelinesClient:
1862
1824
 
1863
1825
  client = AsyncLlamaCloud(
1864
1826
  token="YOUR_TOKEN",
1865
- base_url="https://yourhost.com/path/to/api",
1866
1827
  )
1867
1828
  await client.pipelines.get_eval_dataset_execution_result(
1868
1829
  pipeline_id="string",
@@ -1905,7 +1866,6 @@ class AsyncPipelinesClient:
1905
1866
 
1906
1867
  client = AsyncLlamaCloud(
1907
1868
  token="YOUR_TOKEN",
1908
- base_url="https://yourhost.com/path/to/api",
1909
1869
  )
1910
1870
  await client.pipelines.get_eval_dataset_execution(
1911
1871
  pipeline_id="string",
@@ -1943,7 +1903,6 @@ class AsyncPipelinesClient:
1943
1903
 
1944
1904
  client = AsyncLlamaCloud(
1945
1905
  token="YOUR_TOKEN",
1946
- base_url="https://yourhost.com/path/to/api",
1947
1906
  )
1948
1907
  await client.pipelines.get_files_for_pipeline(
1949
1908
  pipeline_id="string",
@@ -1980,7 +1939,6 @@ class AsyncPipelinesClient:
1980
1939
 
1981
1940
  client = AsyncLlamaCloud(
1982
1941
  token="YOUR_TOKEN",
1983
- base_url="https://yourhost.com/path/to/api",
1984
1942
  )
1985
1943
  await client.pipelines.add_files_to_pipeline(
1986
1944
  pipeline_id="string",
@@ -2017,7 +1975,6 @@ class AsyncPipelinesClient:
2017
1975
 
2018
1976
  client = AsyncLlamaCloud(
2019
1977
  token="YOUR_TOKEN",
2020
- base_url="https://yourhost.com/path/to/api",
2021
1978
  )
2022
1979
  await client.pipelines.get_pipeline_file_status(
2023
1980
  pipeline_id="string",
@@ -2063,7 +2020,6 @@ class AsyncPipelinesClient:
2063
2020
 
2064
2021
  client = AsyncLlamaCloud(
2065
2022
  token="YOUR_TOKEN",
2066
- base_url="https://yourhost.com/path/to/api",
2067
2023
  )
2068
2024
  await client.pipelines.update_pipeline_file(
2069
2025
  pipeline_id="string",
@@ -2105,7 +2061,6 @@ class AsyncPipelinesClient:
2105
2061
 
2106
2062
  client = AsyncLlamaCloud(
2107
2063
  token="YOUR_TOKEN",
2108
- base_url="https://yourhost.com/path/to/api",
2109
2064
  )
2110
2065
  await client.pipelines.delete_pipeline_file(
2111
2066
  pipeline_id="string",
@@ -2141,7 +2096,6 @@ class AsyncPipelinesClient:
2141
2096
 
2142
2097
  client = AsyncLlamaCloud(
2143
2098
  token="YOUR_TOKEN",
2144
- base_url="https://yourhost.com/path/to/api",
2145
2099
  )
2146
2100
  await client.pipelines.get_pipeline_data_sources(
2147
2101
  pipeline_id="string",
@@ -2180,7 +2134,6 @@ class AsyncPipelinesClient:
2180
2134
 
2181
2135
  client = AsyncLlamaCloud(
2182
2136
  token="YOUR_TOKEN",
2183
- base_url="https://yourhost.com/path/to/api",
2184
2137
  )
2185
2138
  await client.pipelines.add_data_sources_to_pipeline(
2186
2139
  pipeline_id="string",
@@ -2219,7 +2172,6 @@ class AsyncPipelinesClient:
2219
2172
 
2220
2173
  client = AsyncLlamaCloud(
2221
2174
  token="YOUR_TOKEN",
2222
- base_url="https://yourhost.com/path/to/api",
2223
2175
  )
2224
2176
  await client.pipelines.delete_pipeline_data_source(
2225
2177
  pipeline_id="string",
@@ -2258,7 +2210,6 @@ class AsyncPipelinesClient:
2258
2210
 
2259
2211
  client = AsyncLlamaCloud(
2260
2212
  token="YOUR_TOKEN",
2261
- base_url="https://yourhost.com/path/to/api",
2262
2213
  )
2263
2214
  await client.pipelines.sync_pipeline_data_source(
2264
2215
  pipeline_id="string",
@@ -2321,7 +2272,6 @@ class AsyncPipelinesClient:
2321
2272
 
2322
2273
  client = AsyncLlamaCloud(
2323
2274
  token="YOUR_TOKEN",
2324
- base_url="https://yourhost.com/path/to/api",
2325
2275
  )
2326
2276
  await client.pipelines.run_search(
2327
2277
  pipeline_id="string",
@@ -2373,7 +2323,6 @@ class AsyncPipelinesClient:
2373
2323
 
2374
2324
  client = AsyncLlamaCloud(
2375
2325
  token="YOUR_TOKEN",
2376
- base_url="https://yourhost.com/path/to/api",
2377
2326
  )
2378
2327
  await client.pipelines.get_pipeline_jobs(
2379
2328
  pipeline_id="string",
@@ -2408,7 +2357,6 @@ class AsyncPipelinesClient:
2408
2357
 
2409
2358
  client = AsyncLlamaCloud(
2410
2359
  token="YOUR_TOKEN",
2411
- base_url="https://yourhost.com/path/to/api",
2412
2360
  )
2413
2361
  await client.pipelines.get_pipeline_job(
2414
2362
  pipeline_id="string",
@@ -2450,7 +2398,6 @@ class AsyncPipelinesClient:
2450
2398
 
2451
2399
  client = AsyncLlamaCloud(
2452
2400
  token="YOUR_TOKEN",
2453
- base_url="https://yourhost.com/path/to/api",
2454
2401
  )
2455
2402
  await client.pipelines.list_pipeline_documents(
2456
2403
  pipeline_id="string",
@@ -2490,7 +2437,6 @@ class AsyncPipelinesClient:
2490
2437
 
2491
2438
  client = AsyncLlamaCloud(
2492
2439
  token="YOUR_TOKEN",
2493
- base_url="https://yourhost.com/path/to/api",
2494
2440
  )
2495
2441
  await client.pipelines.create_batch_pipeline_documents(
2496
2442
  pipeline_id="string",
@@ -2531,7 +2477,6 @@ class AsyncPipelinesClient:
2531
2477
 
2532
2478
  client = AsyncLlamaCloud(
2533
2479
  token="YOUR_TOKEN",
2534
- base_url="https://yourhost.com/path/to/api",
2535
2480
  )
2536
2481
  await client.pipelines.upsert_batch_pipeline_documents(
2537
2482
  pipeline_id="string",
@@ -2570,7 +2515,6 @@ class AsyncPipelinesClient:
2570
2515
 
2571
2516
  client = AsyncLlamaCloud(
2572
2517
  token="YOUR_TOKEN",
2573
- base_url="https://yourhost.com/path/to/api",
2574
2518
  )
2575
2519
  await client.pipelines.get_pipeline_document(
2576
2520
  pipeline_id="string",
@@ -2608,7 +2552,6 @@ class AsyncPipelinesClient:
2608
2552
 
2609
2553
  client = AsyncLlamaCloud(
2610
2554
  token="YOUR_TOKEN",
2611
- base_url="https://yourhost.com/path/to/api",
2612
2555
  )
2613
2556
  await client.pipelines.delete_pipeline_document(
2614
2557
  pipeline_id="string",
@@ -2646,7 +2589,6 @@ class AsyncPipelinesClient:
2646
2589
 
2647
2590
  client = AsyncLlamaCloud(
2648
2591
  token="YOUR_TOKEN",
2649
- base_url="https://yourhost.com/path/to/api",
2650
2592
  )
2651
2593
  await client.pipelines.get_pipeline_document_status(
2652
2594
  pipeline_id="string",
@@ -45,7 +45,6 @@ class ProjectsClient:
45
45
 
46
46
  client = LlamaCloud(
47
47
  token="YOUR_TOKEN",
48
- base_url="https://yourhost.com/path/to/api",
49
48
  )
50
49
  client.projects.list_projects()
51
50
  """
@@ -78,7 +77,6 @@ class ProjectsClient:
78
77
 
79
78
  client = LlamaCloud(
80
79
  token="YOUR_TOKEN",
81
- base_url="https://yourhost.com/path/to/api",
82
80
  )
83
81
  client.projects.create_project(
84
82
  request=ProjectCreate(
@@ -116,7 +114,6 @@ class ProjectsClient:
116
114
 
117
115
  client = LlamaCloud(
118
116
  token="YOUR_TOKEN",
119
- base_url="https://yourhost.com/path/to/api",
120
117
  )
121
118
  client.projects.upsert_project(
122
119
  request=ProjectCreate(
@@ -152,7 +149,6 @@ class ProjectsClient:
152
149
 
153
150
  client = LlamaCloud(
154
151
  token="YOUR_TOKEN",
155
- base_url="https://yourhost.com/path/to/api",
156
152
  )
157
153
  client.projects.get_project(
158
154
  project_id="string",
@@ -187,7 +183,6 @@ class ProjectsClient:
187
183
 
188
184
  client = LlamaCloud(
189
185
  token="YOUR_TOKEN",
190
- base_url="https://yourhost.com/path/to/api",
191
186
  )
192
187
  client.projects.update_existing_project(
193
188
  project_id="string",
@@ -222,7 +217,6 @@ class ProjectsClient:
222
217
 
223
218
  client = LlamaCloud(
224
219
  token="YOUR_TOKEN",
225
- base_url="https://yourhost.com/path/to/api",
226
220
  )
227
221
  client.projects.delete_project(
228
222
  project_id="string",
@@ -255,7 +249,6 @@ class ProjectsClient:
255
249
 
256
250
  client = LlamaCloud(
257
251
  token="YOUR_TOKEN",
258
- base_url="https://yourhost.com/path/to/api",
259
252
  )
260
253
  client.projects.get_datasets_for_project(
261
254
  project_id="string",
@@ -292,7 +285,6 @@ class ProjectsClient:
292
285
 
293
286
  client = LlamaCloud(
294
287
  token="YOUR_TOKEN",
295
- base_url="https://yourhost.com/path/to/api",
296
288
  )
297
289
  client.projects.create_eval_dataset_for_project(
298
290
  project_id="string",
@@ -335,7 +327,6 @@ class ProjectsClient:
335
327
 
336
328
  client = LlamaCloud(
337
329
  token="YOUR_TOKEN",
338
- base_url="https://yourhost.com/path/to/api",
339
330
  )
340
331
  client.projects.create_local_eval_set_for_project(
341
332
  project_id="string",
@@ -373,7 +364,6 @@ class ProjectsClient:
373
364
 
374
365
  client = LlamaCloud(
375
366
  token="YOUR_TOKEN",
376
- base_url="https://yourhost.com/path/to/api",
377
367
  )
378
368
  client.projects.get_local_evals_for_project(
379
369
  project_id="string",
@@ -406,7 +396,6 @@ class ProjectsClient:
406
396
 
407
397
  client = LlamaCloud(
408
398
  token="YOUR_TOKEN",
409
- base_url="https://yourhost.com/path/to/api",
410
399
  )
411
400
  client.projects.get_local_eval_sets_for_project(
412
401
  project_id="string",
@@ -443,7 +432,6 @@ class ProjectsClient:
443
432
 
444
433
  client = LlamaCloud(
445
434
  token="YOUR_TOKEN",
446
- base_url="https://yourhost.com/path/to/api",
447
435
  )
448
436
  client.projects.delete_local_eval_set(
449
437
  project_id="string",
@@ -480,7 +468,6 @@ class ProjectsClient:
480
468
 
481
469
  client = LlamaCloud(
482
470
  token="YOUR_TOKEN",
483
- base_url="https://yourhost.com/path/to/api",
484
471
  )
485
472
  client.projects.get_promptmixin_prompts(
486
473
  project_id="string",
@@ -516,7 +503,6 @@ class ProjectsClient:
516
503
 
517
504
  client = LlamaCloud(
518
505
  token="YOUR_TOKEN",
519
- base_url="https://yourhost.com/path/to/api",
520
506
  )
521
507
  client.projects.create_prompt_mixin_prompts(
522
508
  project_id="string",
@@ -562,7 +548,6 @@ class ProjectsClient:
562
548
 
563
549
  client = LlamaCloud(
564
550
  token="YOUR_TOKEN",
565
- base_url="https://yourhost.com/path/to/api",
566
551
  )
567
552
  client.projects.update_promptmixin_prompts(
568
553
  project_id="string",
@@ -606,7 +591,6 @@ class ProjectsClient:
606
591
 
607
592
  client = LlamaCloud(
608
593
  token="YOUR_TOKEN",
609
- base_url="https://yourhost.com/path/to/api",
610
594
  )
611
595
  client.projects.delete_prompt_mixin_prompts(
612
596
  project_id="string",
@@ -647,7 +631,6 @@ class AsyncProjectsClient:
647
631
 
648
632
  client = AsyncLlamaCloud(
649
633
  token="YOUR_TOKEN",
650
- base_url="https://yourhost.com/path/to/api",
651
634
  )
652
635
  await client.projects.list_projects()
653
636
  """
@@ -680,7 +663,6 @@ class AsyncProjectsClient:
680
663
 
681
664
  client = AsyncLlamaCloud(
682
665
  token="YOUR_TOKEN",
683
- base_url="https://yourhost.com/path/to/api",
684
666
  )
685
667
  await client.projects.create_project(
686
668
  request=ProjectCreate(
@@ -718,7 +700,6 @@ class AsyncProjectsClient:
718
700
 
719
701
  client = AsyncLlamaCloud(
720
702
  token="YOUR_TOKEN",
721
- base_url="https://yourhost.com/path/to/api",
722
703
  )
723
704
  await client.projects.upsert_project(
724
705
  request=ProjectCreate(
@@ -754,7 +735,6 @@ class AsyncProjectsClient:
754
735
 
755
736
  client = AsyncLlamaCloud(
756
737
  token="YOUR_TOKEN",
757
- base_url="https://yourhost.com/path/to/api",
758
738
  )
759
739
  await client.projects.get_project(
760
740
  project_id="string",
@@ -789,7 +769,6 @@ class AsyncProjectsClient:
789
769
 
790
770
  client = AsyncLlamaCloud(
791
771
  token="YOUR_TOKEN",
792
- base_url="https://yourhost.com/path/to/api",
793
772
  )
794
773
  await client.projects.update_existing_project(
795
774
  project_id="string",
@@ -824,7 +803,6 @@ class AsyncProjectsClient:
824
803
 
825
804
  client = AsyncLlamaCloud(
826
805
  token="YOUR_TOKEN",
827
- base_url="https://yourhost.com/path/to/api",
828
806
  )
829
807
  await client.projects.delete_project(
830
808
  project_id="string",
@@ -857,7 +835,6 @@ class AsyncProjectsClient:
857
835
 
858
836
  client = AsyncLlamaCloud(
859
837
  token="YOUR_TOKEN",
860
- base_url="https://yourhost.com/path/to/api",
861
838
  )
862
839
  await client.projects.get_datasets_for_project(
863
840
  project_id="string",
@@ -894,7 +871,6 @@ class AsyncProjectsClient:
894
871
 
895
872
  client = AsyncLlamaCloud(
896
873
  token="YOUR_TOKEN",
897
- base_url="https://yourhost.com/path/to/api",
898
874
  )
899
875
  await client.projects.create_eval_dataset_for_project(
900
876
  project_id="string",
@@ -937,7 +913,6 @@ class AsyncProjectsClient:
937
913
 
938
914
  client = AsyncLlamaCloud(
939
915
  token="YOUR_TOKEN",
940
- base_url="https://yourhost.com/path/to/api",
941
916
  )
942
917
  await client.projects.create_local_eval_set_for_project(
943
918
  project_id="string",
@@ -975,7 +950,6 @@ class AsyncProjectsClient:
975
950
 
976
951
  client = AsyncLlamaCloud(
977
952
  token="YOUR_TOKEN",
978
- base_url="https://yourhost.com/path/to/api",
979
953
  )
980
954
  await client.projects.get_local_evals_for_project(
981
955
  project_id="string",
@@ -1008,7 +982,6 @@ class AsyncProjectsClient:
1008
982
 
1009
983
  client = AsyncLlamaCloud(
1010
984
  token="YOUR_TOKEN",
1011
- base_url="https://yourhost.com/path/to/api",
1012
985
  )
1013
986
  await client.projects.get_local_eval_sets_for_project(
1014
987
  project_id="string",
@@ -1045,7 +1018,6 @@ class AsyncProjectsClient:
1045
1018
 
1046
1019
  client = AsyncLlamaCloud(
1047
1020
  token="YOUR_TOKEN",
1048
- base_url="https://yourhost.com/path/to/api",
1049
1021
  )
1050
1022
  await client.projects.delete_local_eval_set(
1051
1023
  project_id="string",
@@ -1082,7 +1054,6 @@ class AsyncProjectsClient:
1082
1054
 
1083
1055
  client = AsyncLlamaCloud(
1084
1056
  token="YOUR_TOKEN",
1085
- base_url="https://yourhost.com/path/to/api",
1086
1057
  )
1087
1058
  await client.projects.get_promptmixin_prompts(
1088
1059
  project_id="string",
@@ -1118,7 +1089,6 @@ class AsyncProjectsClient:
1118
1089
 
1119
1090
  client = AsyncLlamaCloud(
1120
1091
  token="YOUR_TOKEN",
1121
- base_url="https://yourhost.com/path/to/api",
1122
1092
  )
1123
1093
  await client.projects.create_prompt_mixin_prompts(
1124
1094
  project_id="string",
@@ -1164,7 +1134,6 @@ class AsyncProjectsClient:
1164
1134
 
1165
1135
  client = AsyncLlamaCloud(
1166
1136
  token="YOUR_TOKEN",
1167
- base_url="https://yourhost.com/path/to/api",
1168
1137
  )
1169
1138
  await client.projects.update_promptmixin_prompts(
1170
1139
  project_id="string",
@@ -1208,7 +1177,6 @@ class AsyncProjectsClient:
1208
1177
 
1209
1178
  client = AsyncLlamaCloud(
1210
1179
  token="YOUR_TOKEN",
1211
- base_url="https://yourhost.com/path/to/api",
1212
1180
  )
1213
1181
  await client.projects.delete_prompt_mixin_prompts(
1214
1182
  project_id="string",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.0.3
3
+ Version: 0.0.4
4
4
  Summary:
5
5
  Author: Logan Markewich
6
6
  Author-email: logan@runllama.ai