llama-cloud 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -147,6 +147,7 @@ from .resources import (
147
147
  pipelines,
148
148
  projects,
149
149
  )
150
+ from .environment import LlamaCloudEnvironment
150
151
 
151
152
  __all__ = [
152
153
  "ApiKey",
@@ -218,6 +219,7 @@ __all__ = [
218
219
  "HuggingFaceInferenceApiEmbedding",
219
220
  "HuggingFaceInferenceApiEmbeddingToken",
220
221
  "JsonNodeParser",
222
+ "LlamaCloudEnvironment",
221
223
  "LlamaParseSupportedFileExtensions",
222
224
  "Llm",
223
225
  "LocalEval",
llama_cloud/client.py CHANGED
@@ -5,6 +5,7 @@ import typing
5
5
  import httpx
6
6
 
7
7
  from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from .environment import LlamaCloudEnvironment
8
9
  from .resources.api_keys.client import ApiKeysClient, AsyncApiKeysClient
9
10
  from .resources.billing.client import AsyncBillingClient, BillingClient
10
11
  from .resources.component_definitions.client import AsyncComponentDefinitionsClient, ComponentDefinitionsClient
@@ -22,13 +23,14 @@ class LlamaCloud:
22
23
  def __init__(
23
24
  self,
24
25
  *,
25
- base_url: str,
26
+ base_url: typing.Optional[str] = None,
27
+ environment: LlamaCloudEnvironment = LlamaCloudEnvironment.DEFAULT,
26
28
  token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
27
29
  timeout: typing.Optional[float] = 60,
28
30
  httpx_client: typing.Optional[httpx.Client] = None
29
31
  ):
30
32
  self._client_wrapper = SyncClientWrapper(
31
- base_url=base_url,
33
+ base_url=_get_base_url(base_url=base_url, environment=environment),
32
34
  token=token,
33
35
  httpx_client=httpx.Client(timeout=timeout) if httpx_client is None else httpx_client,
34
36
  )
@@ -49,13 +51,14 @@ class AsyncLlamaCloud:
49
51
  def __init__(
50
52
  self,
51
53
  *,
52
- base_url: str,
54
+ base_url: typing.Optional[str] = None,
55
+ environment: LlamaCloudEnvironment = LlamaCloudEnvironment.DEFAULT,
53
56
  token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
54
57
  timeout: typing.Optional[float] = 60,
55
58
  httpx_client: typing.Optional[httpx.AsyncClient] = None
56
59
  ):
57
60
  self._client_wrapper = AsyncClientWrapper(
58
- base_url=base_url,
61
+ base_url=_get_base_url(base_url=base_url, environment=environment),
59
62
  token=token,
60
63
  httpx_client=httpx.AsyncClient(timeout=timeout) if httpx_client is None else httpx_client,
61
64
  )
@@ -70,3 +73,12 @@ class AsyncLlamaCloud:
70
73
  self.component_definitions = AsyncComponentDefinitionsClient(client_wrapper=self._client_wrapper)
71
74
  self.billing = AsyncBillingClient(client_wrapper=self._client_wrapper)
72
75
  self.deprecated = AsyncDeprecatedClient(client_wrapper=self._client_wrapper)
76
+
77
+
78
+ def _get_base_url(*, base_url: typing.Optional[str] = None, environment: LlamaCloudEnvironment) -> str:
79
+ if base_url is not None:
80
+ return base_url
81
+ elif environment is not None:
82
+ return environment.value
83
+ else:
84
+ raise Exception("Please pass in either base_url or environment to construct the client")
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+
5
+
6
+ class LlamaCloudEnvironment(enum.Enum):
7
+ DEFAULT = "https://api.cloud.llamaindex.ai/"
@@ -36,7 +36,6 @@ class ApiKeysClient:
36
36
 
37
37
  client = LlamaCloud(
38
38
  token="YOUR_TOKEN",
39
- base_url="https://yourhost.com/path/to/api",
40
39
  )
41
40
  client.api_keys.get_keys()
42
41
  """
@@ -67,7 +66,6 @@ class ApiKeysClient:
67
66
 
68
67
  client = LlamaCloud(
69
68
  token="YOUR_TOKEN",
70
- base_url="https://yourhost.com/path/to/api",
71
69
  )
72
70
  client.api_keys.generate_key()
73
71
  """
@@ -104,7 +102,6 @@ class ApiKeysClient:
104
102
 
105
103
  client = LlamaCloud(
106
104
  token="YOUR_TOKEN",
107
- base_url="https://yourhost.com/path/to/api",
108
105
  )
109
106
  client.api_keys.update_existing_api_key(
110
107
  api_key_id="string",
@@ -141,7 +138,6 @@ class ApiKeysClient:
141
138
 
142
139
  client = LlamaCloud(
143
140
  token="YOUR_TOKEN",
144
- base_url="https://yourhost.com/path/to/api",
145
141
  )
146
142
  client.api_keys.delete_api_key(
147
143
  api_key_id="string",
@@ -177,7 +173,6 @@ class AsyncApiKeysClient:
177
173
 
178
174
  client = AsyncLlamaCloud(
179
175
  token="YOUR_TOKEN",
180
- base_url="https://yourhost.com/path/to/api",
181
176
  )
182
177
  await client.api_keys.get_keys()
183
178
  """
@@ -208,7 +203,6 @@ class AsyncApiKeysClient:
208
203
 
209
204
  client = AsyncLlamaCloud(
210
205
  token="YOUR_TOKEN",
211
- base_url="https://yourhost.com/path/to/api",
212
206
  )
213
207
  await client.api_keys.generate_key()
214
208
  """
@@ -245,7 +239,6 @@ class AsyncApiKeysClient:
245
239
 
246
240
  client = AsyncLlamaCloud(
247
241
  token="YOUR_TOKEN",
248
- base_url="https://yourhost.com/path/to/api",
249
242
  )
250
243
  await client.api_keys.update_existing_api_key(
251
244
  api_key_id="string",
@@ -282,7 +275,6 @@ class AsyncApiKeysClient:
282
275
 
283
276
  client = AsyncLlamaCloud(
284
277
  token="YOUR_TOKEN",
285
- base_url="https://yourhost.com/path/to/api",
286
278
  )
287
279
  await client.api_keys.delete_api_key(
288
280
  api_key_id="string",
@@ -40,7 +40,6 @@ class BillingClient:
40
40
 
41
41
  client = LlamaCloud(
42
42
  token="YOUR_TOKEN",
43
- base_url="https://yourhost.com/path/to/api",
44
43
  )
45
44
  client.billing.create_checkout_session(
46
45
  success_url="string",
@@ -75,7 +74,6 @@ class BillingClient:
75
74
 
76
75
  client = LlamaCloud(
77
76
  token="YOUR_TOKEN",
78
- base_url="https://yourhost.com/path/to/api",
79
77
  )
80
78
  client.billing.create_customer_portal_session(
81
79
  return_url="string",
@@ -109,7 +107,6 @@ class BillingClient:
109
107
 
110
108
  client = LlamaCloud(
111
109
  token="YOUR_TOKEN",
112
- base_url="https://yourhost.com/path/to/api",
113
110
  )
114
111
  client.billing.stripe_webhook()
115
112
  """
@@ -147,7 +144,6 @@ class AsyncBillingClient:
147
144
 
148
145
  client = AsyncLlamaCloud(
149
146
  token="YOUR_TOKEN",
150
- base_url="https://yourhost.com/path/to/api",
151
147
  )
152
148
  await client.billing.create_checkout_session(
153
149
  success_url="string",
@@ -182,7 +178,6 @@ class AsyncBillingClient:
182
178
 
183
179
  client = AsyncLlamaCloud(
184
180
  token="YOUR_TOKEN",
185
- base_url="https://yourhost.com/path/to/api",
186
181
  )
187
182
  await client.billing.create_customer_portal_session(
188
183
  return_url="string",
@@ -216,7 +211,6 @@ class AsyncBillingClient:
216
211
 
217
212
  client = AsyncLlamaCloud(
218
213
  token="YOUR_TOKEN",
219
- base_url="https://yourhost.com/path/to/api",
220
214
  )
221
215
  await client.billing.stripe_webhook()
222
216
  """
@@ -32,7 +32,6 @@ class ComponentDefinitionsClient:
32
32
 
33
33
  client = LlamaCloud(
34
34
  token="YOUR_TOKEN",
35
- base_url="https://yourhost.com/path/to/api",
36
35
  )
37
36
  client.component_definitions.get_all_transformation_definitions()
38
37
  """
@@ -61,7 +60,6 @@ class ComponentDefinitionsClient:
61
60
 
62
61
  client = LlamaCloud(
63
62
  token="YOUR_TOKEN",
64
- base_url="https://yourhost.com/path/to/api",
65
63
  )
66
64
  client.component_definitions.get_all_data_source_definitions()
67
65
  """
@@ -88,7 +86,6 @@ class ComponentDefinitionsClient:
88
86
 
89
87
  client = LlamaCloud(
90
88
  token="YOUR_TOKEN",
91
- base_url="https://yourhost.com/path/to/api",
92
89
  )
93
90
  client.component_definitions.get_all_data_sink_definitions()
94
91
  """
@@ -120,7 +117,6 @@ class AsyncComponentDefinitionsClient:
120
117
 
121
118
  client = AsyncLlamaCloud(
122
119
  token="YOUR_TOKEN",
123
- base_url="https://yourhost.com/path/to/api",
124
120
  )
125
121
  await client.component_definitions.get_all_transformation_definitions()
126
122
  """
@@ -149,7 +145,6 @@ class AsyncComponentDefinitionsClient:
149
145
 
150
146
  client = AsyncLlamaCloud(
151
147
  token="YOUR_TOKEN",
152
- base_url="https://yourhost.com/path/to/api",
153
148
  )
154
149
  await client.component_definitions.get_all_data_source_definitions()
155
150
  """
@@ -176,7 +171,6 @@ class AsyncComponentDefinitionsClient:
176
171
 
177
172
  client = AsyncLlamaCloud(
178
173
  token="YOUR_TOKEN",
179
- base_url="https://yourhost.com/path/to/api",
180
174
  )
181
175
  await client.component_definitions.get_all_data_sink_definitions()
182
176
  """
@@ -43,7 +43,6 @@ class DataSinksClient:
43
43
 
44
44
  client = LlamaCloud(
45
45
  token="YOUR_TOKEN",
46
- base_url="https://yourhost.com/path/to/api",
47
46
  )
48
47
  client.data_sinks.list_data_sinks()
49
48
  """
@@ -78,7 +77,6 @@ class DataSinksClient:
78
77
 
79
78
  client = LlamaCloud(
80
79
  token="YOUR_TOKEN",
81
- base_url="https://yourhost.com/path/to/api",
82
80
  )
83
81
  client.data_sinks.create_data_sink(
84
82
  request=DataSinkCreate(
@@ -120,7 +118,6 @@ class DataSinksClient:
120
118
 
121
119
  client = LlamaCloud(
122
120
  token="YOUR_TOKEN",
123
- base_url="https://yourhost.com/path/to/api",
124
121
  )
125
122
  client.data_sinks.upsert_data_sink(
126
123
  request=DataSinkCreate(
@@ -158,7 +155,6 @@ class DataSinksClient:
158
155
 
159
156
  client = LlamaCloud(
160
157
  token="YOUR_TOKEN",
161
- base_url="https://yourhost.com/path/to/api",
162
158
  )
163
159
  client.data_sinks.get_data_sink(
164
160
  data_sink_id="string",
@@ -205,7 +201,6 @@ class DataSinksClient:
205
201
 
206
202
  client = LlamaCloud(
207
203
  token="YOUR_TOKEN",
208
- base_url="https://yourhost.com/path/to/api",
209
204
  )
210
205
  client.data_sinks.update_data_sink(
211
206
  data_sink_id="string",
@@ -245,7 +240,6 @@ class DataSinksClient:
245
240
 
246
241
  client = LlamaCloud(
247
242
  token="YOUR_TOKEN",
248
- base_url="https://yourhost.com/path/to/api",
249
243
  )
250
244
  client.data_sinks.delete_data_sink(
251
245
  data_sink_id="string",
@@ -284,7 +278,6 @@ class AsyncDataSinksClient:
284
278
 
285
279
  client = AsyncLlamaCloud(
286
280
  token="YOUR_TOKEN",
287
- base_url="https://yourhost.com/path/to/api",
288
281
  )
289
282
  await client.data_sinks.list_data_sinks()
290
283
  """
@@ -319,7 +312,6 @@ class AsyncDataSinksClient:
319
312
 
320
313
  client = AsyncLlamaCloud(
321
314
  token="YOUR_TOKEN",
322
- base_url="https://yourhost.com/path/to/api",
323
315
  )
324
316
  await client.data_sinks.create_data_sink(
325
317
  request=DataSinkCreate(
@@ -361,7 +353,6 @@ class AsyncDataSinksClient:
361
353
 
362
354
  client = AsyncLlamaCloud(
363
355
  token="YOUR_TOKEN",
364
- base_url="https://yourhost.com/path/to/api",
365
356
  )
366
357
  await client.data_sinks.upsert_data_sink(
367
358
  request=DataSinkCreate(
@@ -399,7 +390,6 @@ class AsyncDataSinksClient:
399
390
 
400
391
  client = AsyncLlamaCloud(
401
392
  token="YOUR_TOKEN",
402
- base_url="https://yourhost.com/path/to/api",
403
393
  )
404
394
  await client.data_sinks.get_data_sink(
405
395
  data_sink_id="string",
@@ -446,7 +436,6 @@ class AsyncDataSinksClient:
446
436
 
447
437
  client = AsyncLlamaCloud(
448
438
  token="YOUR_TOKEN",
449
- base_url="https://yourhost.com/path/to/api",
450
439
  )
451
440
  await client.data_sinks.update_data_sink(
452
441
  data_sink_id="string",
@@ -486,7 +475,6 @@ class AsyncDataSinksClient:
486
475
 
487
476
  client = AsyncLlamaCloud(
488
477
  token="YOUR_TOKEN",
489
- base_url="https://yourhost.com/path/to/api",
490
478
  )
491
479
  await client.data_sinks.delete_data_sink(
492
480
  data_sink_id="string",
@@ -44,7 +44,6 @@ class DataSourcesClient:
44
44
 
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
- base_url="https://yourhost.com/path/to/api",
48
47
  )
49
48
  client.data_sources.list_data_sources()
50
49
  """
@@ -79,7 +78,6 @@ class DataSourcesClient:
79
78
 
80
79
  client = LlamaCloud(
81
80
  token="YOUR_TOKEN",
82
- base_url="https://yourhost.com/path/to/api",
83
81
  )
84
82
  client.data_sources.create_data_source(
85
83
  request=DataSourceCreate(
@@ -121,7 +119,6 @@ class DataSourcesClient:
121
119
 
122
120
  client = LlamaCloud(
123
121
  token="YOUR_TOKEN",
124
- base_url="https://yourhost.com/path/to/api",
125
122
  )
126
123
  client.data_sources.upsert_data_source(
127
124
  request=DataSourceCreate(
@@ -159,7 +156,6 @@ class DataSourcesClient:
159
156
 
160
157
  client = LlamaCloud(
161
158
  token="YOUR_TOKEN",
162
- base_url="https://yourhost.com/path/to/api",
163
159
  )
164
160
  client.data_sources.get_data_source(
165
161
  data_source_id="string",
@@ -209,7 +205,6 @@ class DataSourcesClient:
209
205
 
210
206
  client = LlamaCloud(
211
207
  token="YOUR_TOKEN",
212
- base_url="https://yourhost.com/path/to/api",
213
208
  )
214
209
  client.data_sources.update_data_source(
215
210
  data_source_id="string",
@@ -251,7 +246,6 @@ class DataSourcesClient:
251
246
 
252
247
  client = LlamaCloud(
253
248
  token="YOUR_TOKEN",
254
- base_url="https://yourhost.com/path/to/api",
255
249
  )
256
250
  client.data_sources.delete_data_source(
257
251
  data_source_id="string",
@@ -290,7 +284,6 @@ class AsyncDataSourcesClient:
290
284
 
291
285
  client = AsyncLlamaCloud(
292
286
  token="YOUR_TOKEN",
293
- base_url="https://yourhost.com/path/to/api",
294
287
  )
295
288
  await client.data_sources.list_data_sources()
296
289
  """
@@ -327,7 +320,6 @@ class AsyncDataSourcesClient:
327
320
 
328
321
  client = AsyncLlamaCloud(
329
322
  token="YOUR_TOKEN",
330
- base_url="https://yourhost.com/path/to/api",
331
323
  )
332
324
  await client.data_sources.create_data_source(
333
325
  request=DataSourceCreate(
@@ -371,7 +363,6 @@ class AsyncDataSourcesClient:
371
363
 
372
364
  client = AsyncLlamaCloud(
373
365
  token="YOUR_TOKEN",
374
- base_url="https://yourhost.com/path/to/api",
375
366
  )
376
367
  await client.data_sources.upsert_data_source(
377
368
  request=DataSourceCreate(
@@ -409,7 +400,6 @@ class AsyncDataSourcesClient:
409
400
 
410
401
  client = AsyncLlamaCloud(
411
402
  token="YOUR_TOKEN",
412
- base_url="https://yourhost.com/path/to/api",
413
403
  )
414
404
  await client.data_sources.get_data_source(
415
405
  data_source_id="string",
@@ -459,7 +449,6 @@ class AsyncDataSourcesClient:
459
449
 
460
450
  client = AsyncLlamaCloud(
461
451
  token="YOUR_TOKEN",
462
- base_url="https://yourhost.com/path/to/api",
463
452
  )
464
453
  await client.data_sources.update_data_source(
465
454
  data_source_id="string",
@@ -501,7 +490,6 @@ class AsyncDataSourcesClient:
501
490
 
502
491
  client = AsyncLlamaCloud(
503
492
  token="YOUR_TOKEN",
504
- base_url="https://yourhost.com/path/to/api",
505
493
  )
506
494
  await client.data_sources.delete_data_source(
507
495
  data_source_id="string",
@@ -48,7 +48,6 @@ class DeprecatedClient:
48
48
 
49
49
  client = LlamaCloud(
50
50
  token="YOUR_TOKEN",
51
- base_url="https://yourhost.com/path/to/api",
52
51
  )
53
52
  client.deprecated.get_job_image_result(
54
53
  job_id="string",
@@ -82,7 +81,6 @@ class DeprecatedClient:
82
81
 
83
82
  client = LlamaCloud(
84
83
  token="YOUR_TOKEN",
85
- base_url="https://yourhost.com/path/to/api",
86
84
  )
87
85
  client.deprecated.get_supported_file_extensions()
88
86
  """
@@ -181,7 +179,6 @@ class DeprecatedClient:
181
179
 
182
180
  client = LlamaCloud(
183
181
  token="YOUR_TOKEN",
184
- base_url="https://yourhost.com/path/to/api",
185
182
  )
186
183
  client.deprecated.usage()
187
184
  """
@@ -212,7 +209,6 @@ class DeprecatedClient:
212
209
 
213
210
  client = LlamaCloud(
214
211
  token="YOUR_TOKEN",
215
- base_url="https://yourhost.com/path/to/api",
216
212
  )
217
213
  client.deprecated.get_job(
218
214
  job_id="string",
@@ -245,7 +241,6 @@ class DeprecatedClient:
245
241
 
246
242
  client = LlamaCloud(
247
243
  token="YOUR_TOKEN",
248
- base_url="https://yourhost.com/path/to/api",
249
244
  )
250
245
  client.deprecated.get_job_text_result(
251
246
  job_id="string",
@@ -278,7 +273,6 @@ class DeprecatedClient:
278
273
 
279
274
  client = LlamaCloud(
280
275
  token="YOUR_TOKEN",
281
- base_url="https://yourhost.com/path/to/api",
282
276
  )
283
277
  client.deprecated.get_job_raw_text_result(
284
278
  job_id="string",
@@ -313,7 +307,6 @@ class DeprecatedClient:
313
307
 
314
308
  client = LlamaCloud(
315
309
  token="YOUR_TOKEN",
316
- base_url="https://yourhost.com/path/to/api",
317
310
  )
318
311
  client.deprecated.get_job_result(
319
312
  job_id="string",
@@ -348,7 +341,6 @@ class DeprecatedClient:
348
341
 
349
342
  client = LlamaCloud(
350
343
  token="YOUR_TOKEN",
351
- base_url="https://yourhost.com/path/to/api",
352
344
  )
353
345
  client.deprecated.get_job_raw_md_result(
354
346
  job_id="string",
@@ -383,7 +375,6 @@ class DeprecatedClient:
383
375
 
384
376
  client = LlamaCloud(
385
377
  token="YOUR_TOKEN",
386
- base_url="https://yourhost.com/path/to/api",
387
378
  )
388
379
  client.deprecated.get_job_json_result(
389
380
  job_id="string",
@@ -416,7 +407,6 @@ class DeprecatedClient:
416
407
 
417
408
  client = LlamaCloud(
418
409
  token="YOUR_TOKEN",
419
- base_url="https://yourhost.com/path/to/api",
420
410
  )
421
411
  client.deprecated.get_job_json_raw_result(
422
412
  job_id="string",
@@ -449,7 +439,6 @@ class DeprecatedClient:
449
439
 
450
440
  client = LlamaCloud(
451
441
  token="YOUR_TOKEN",
452
- base_url="https://yourhost.com/path/to/api",
453
442
  )
454
443
  client.deprecated.get_parsing_history_result()
455
444
  """
@@ -482,7 +471,6 @@ class DeprecatedClient:
482
471
 
483
472
  client = LlamaCloud(
484
473
  token="YOUR_TOKEN",
485
- base_url="https://yourhost.com/path/to/api",
486
474
  )
487
475
  client.deprecated.generate_presigned_url(
488
476
  job_id="string",
@@ -525,7 +513,6 @@ class AsyncDeprecatedClient:
525
513
 
526
514
  client = AsyncLlamaCloud(
527
515
  token="YOUR_TOKEN",
528
- base_url="https://yourhost.com/path/to/api",
529
516
  )
530
517
  await client.deprecated.get_job_image_result(
531
518
  job_id="string",
@@ -559,7 +546,6 @@ class AsyncDeprecatedClient:
559
546
 
560
547
  client = AsyncLlamaCloud(
561
548
  token="YOUR_TOKEN",
562
- base_url="https://yourhost.com/path/to/api",
563
549
  )
564
550
  await client.deprecated.get_supported_file_extensions()
565
551
  """
@@ -658,7 +644,6 @@ class AsyncDeprecatedClient:
658
644
 
659
645
  client = AsyncLlamaCloud(
660
646
  token="YOUR_TOKEN",
661
- base_url="https://yourhost.com/path/to/api",
662
647
  )
663
648
  await client.deprecated.usage()
664
649
  """
@@ -689,7 +674,6 @@ class AsyncDeprecatedClient:
689
674
 
690
675
  client = AsyncLlamaCloud(
691
676
  token="YOUR_TOKEN",
692
- base_url="https://yourhost.com/path/to/api",
693
677
  )
694
678
  await client.deprecated.get_job(
695
679
  job_id="string",
@@ -722,7 +706,6 @@ class AsyncDeprecatedClient:
722
706
 
723
707
  client = AsyncLlamaCloud(
724
708
  token="YOUR_TOKEN",
725
- base_url="https://yourhost.com/path/to/api",
726
709
  )
727
710
  await client.deprecated.get_job_text_result(
728
711
  job_id="string",
@@ -755,7 +738,6 @@ class AsyncDeprecatedClient:
755
738
 
756
739
  client = AsyncLlamaCloud(
757
740
  token="YOUR_TOKEN",
758
- base_url="https://yourhost.com/path/to/api",
759
741
  )
760
742
  await client.deprecated.get_job_raw_text_result(
761
743
  job_id="string",
@@ -790,7 +772,6 @@ class AsyncDeprecatedClient:
790
772
 
791
773
  client = AsyncLlamaCloud(
792
774
  token="YOUR_TOKEN",
793
- base_url="https://yourhost.com/path/to/api",
794
775
  )
795
776
  await client.deprecated.get_job_result(
796
777
  job_id="string",
@@ -825,7 +806,6 @@ class AsyncDeprecatedClient:
825
806
 
826
807
  client = AsyncLlamaCloud(
827
808
  token="YOUR_TOKEN",
828
- base_url="https://yourhost.com/path/to/api",
829
809
  )
830
810
  await client.deprecated.get_job_raw_md_result(
831
811
  job_id="string",
@@ -860,7 +840,6 @@ class AsyncDeprecatedClient:
860
840
 
861
841
  client = AsyncLlamaCloud(
862
842
  token="YOUR_TOKEN",
863
- base_url="https://yourhost.com/path/to/api",
864
843
  )
865
844
  await client.deprecated.get_job_json_result(
866
845
  job_id="string",
@@ -893,7 +872,6 @@ class AsyncDeprecatedClient:
893
872
 
894
873
  client = AsyncLlamaCloud(
895
874
  token="YOUR_TOKEN",
896
- base_url="https://yourhost.com/path/to/api",
897
875
  )
898
876
  await client.deprecated.get_job_json_raw_result(
899
877
  job_id="string",
@@ -926,7 +904,6 @@ class AsyncDeprecatedClient:
926
904
 
927
905
  client = AsyncLlamaCloud(
928
906
  token="YOUR_TOKEN",
929
- base_url="https://yourhost.com/path/to/api",
930
907
  )
931
908
  await client.deprecated.get_parsing_history_result()
932
909
  """
@@ -959,7 +936,6 @@ class AsyncDeprecatedClient:
959
936
 
960
937
  client = AsyncLlamaCloud(
961
938
  token="YOUR_TOKEN",
962
- base_url="https://yourhost.com/path/to/api",
963
939
  )
964
940
  await client.deprecated.generate_presigned_url(
965
941
  job_id="string",