llama-cloud 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -1,7 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .types import (
4
- ApiKey,
5
4
  AzureOpenAiEmbedding,
6
5
  Base,
7
6
  BasePromptTemplate,
@@ -135,21 +134,18 @@ from .resources import (
135
134
  DataSourceUpdateCustomMetadataValue,
136
135
  FileCreateResourceInfoValue,
137
136
  PipelineFileUpdateCustomMetadataValue,
138
- api_keys,
139
- billing,
140
137
  component_definitions,
141
138
  data_sinks,
142
139
  data_sources,
143
- deprecated,
144
140
  evals,
145
141
  files,
146
142
  parsing,
147
143
  pipelines,
148
144
  projects,
149
145
  )
146
+ from .environment import LlamaCloudEnvironment
150
147
 
151
148
  __all__ = [
152
- "ApiKey",
153
149
  "AzureOpenAiEmbedding",
154
150
  "Base",
155
151
  "BasePromptTemplate",
@@ -218,6 +214,7 @@ __all__ = [
218
214
  "HuggingFaceInferenceApiEmbedding",
219
215
  "HuggingFaceInferenceApiEmbeddingToken",
220
216
  "JsonNodeParser",
217
+ "LlamaCloudEnvironment",
221
218
  "LlamaParseSupportedFileExtensions",
222
219
  "Llm",
223
220
  "LocalEval",
@@ -281,12 +278,9 @@ __all__ = [
281
278
  "UnprocessableEntityError",
282
279
  "ValidationError",
283
280
  "ValidationErrorLocItem",
284
- "api_keys",
285
- "billing",
286
281
  "component_definitions",
287
282
  "data_sinks",
288
283
  "data_sources",
289
- "deprecated",
290
284
  "evals",
291
285
  "files",
292
286
  "parsing",
llama_cloud/client.py CHANGED
@@ -5,12 +5,10 @@ import typing
5
5
  import httpx
6
6
 
7
7
  from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
- from .resources.api_keys.client import ApiKeysClient, AsyncApiKeysClient
9
- from .resources.billing.client import AsyncBillingClient, BillingClient
8
+ from .environment import LlamaCloudEnvironment
10
9
  from .resources.component_definitions.client import AsyncComponentDefinitionsClient, ComponentDefinitionsClient
11
10
  from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
12
11
  from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
13
- from .resources.deprecated.client import AsyncDeprecatedClient, DeprecatedClient
14
12
  from .resources.evals.client import AsyncEvalsClient, EvalsClient
15
13
  from .resources.files.client import AsyncFilesClient, FilesClient
16
14
  from .resources.parsing.client import AsyncParsingClient, ParsingClient
@@ -22,17 +20,17 @@ class LlamaCloud:
22
20
  def __init__(
23
21
  self,
24
22
  *,
25
- base_url: str,
23
+ base_url: typing.Optional[str] = None,
24
+ environment: LlamaCloudEnvironment = LlamaCloudEnvironment.DEFAULT,
26
25
  token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
27
26
  timeout: typing.Optional[float] = 60,
28
27
  httpx_client: typing.Optional[httpx.Client] = None
29
28
  ):
30
29
  self._client_wrapper = SyncClientWrapper(
31
- base_url=base_url,
30
+ base_url=_get_base_url(base_url=base_url, environment=environment),
32
31
  token=token,
33
32
  httpx_client=httpx.Client(timeout=timeout) if httpx_client is None else httpx_client,
34
33
  )
35
- self.api_keys = ApiKeysClient(client_wrapper=self._client_wrapper)
36
34
  self.data_sinks = DataSinksClient(client_wrapper=self._client_wrapper)
37
35
  self.data_sources = DataSourcesClient(client_wrapper=self._client_wrapper)
38
36
  self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
@@ -41,25 +39,23 @@ class LlamaCloud:
41
39
  self.evals = EvalsClient(client_wrapper=self._client_wrapper)
42
40
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
43
41
  self.component_definitions = ComponentDefinitionsClient(client_wrapper=self._client_wrapper)
44
- self.billing = BillingClient(client_wrapper=self._client_wrapper)
45
- self.deprecated = DeprecatedClient(client_wrapper=self._client_wrapper)
46
42
 
47
43
 
48
44
  class AsyncLlamaCloud:
49
45
  def __init__(
50
46
  self,
51
47
  *,
52
- base_url: str,
48
+ base_url: typing.Optional[str] = None,
49
+ environment: LlamaCloudEnvironment = LlamaCloudEnvironment.DEFAULT,
53
50
  token: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = None,
54
51
  timeout: typing.Optional[float] = 60,
55
52
  httpx_client: typing.Optional[httpx.AsyncClient] = None
56
53
  ):
57
54
  self._client_wrapper = AsyncClientWrapper(
58
- base_url=base_url,
55
+ base_url=_get_base_url(base_url=base_url, environment=environment),
59
56
  token=token,
60
57
  httpx_client=httpx.AsyncClient(timeout=timeout) if httpx_client is None else httpx_client,
61
58
  )
62
- self.api_keys = AsyncApiKeysClient(client_wrapper=self._client_wrapper)
63
59
  self.data_sinks = AsyncDataSinksClient(client_wrapper=self._client_wrapper)
64
60
  self.data_sources = AsyncDataSourcesClient(client_wrapper=self._client_wrapper)
65
61
  self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
@@ -68,5 +64,12 @@ class AsyncLlamaCloud:
68
64
  self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
69
65
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
70
66
  self.component_definitions = AsyncComponentDefinitionsClient(client_wrapper=self._client_wrapper)
71
- self.billing = AsyncBillingClient(client_wrapper=self._client_wrapper)
72
- self.deprecated = AsyncDeprecatedClient(client_wrapper=self._client_wrapper)
67
+
68
+
69
+ def _get_base_url(*, base_url: typing.Optional[str] = None, environment: LlamaCloudEnvironment) -> str:
70
+ if base_url is not None:
71
+ return base_url
72
+ elif environment is not None:
73
+ return environment.value
74
+ else:
75
+ raise Exception("Please pass in either base_url or environment to construct the client")
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+
5
+
6
+ class LlamaCloudEnvironment(enum.Enum):
7
+ DEFAULT = "https://api.cloud.llamaindex.ai/"
@@ -1,18 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from . import (
4
- api_keys,
5
- billing,
6
- component_definitions,
7
- data_sinks,
8
- data_sources,
9
- deprecated,
10
- evals,
11
- files,
12
- parsing,
13
- pipelines,
14
- projects,
15
- )
3
+ from . import component_definitions, data_sinks, data_sources, evals, files, parsing, pipelines, projects
16
4
  from .data_sinks import DataSinkUpdateComponent, DataSinkUpdateComponentOne
17
5
  from .data_sources import DataSourceUpdateComponent, DataSourceUpdateComponentOne, DataSourceUpdateCustomMetadataValue
18
6
  from .files import FileCreateResourceInfoValue
@@ -26,12 +14,9 @@ __all__ = [
26
14
  "DataSourceUpdateCustomMetadataValue",
27
15
  "FileCreateResourceInfoValue",
28
16
  "PipelineFileUpdateCustomMetadataValue",
29
- "api_keys",
30
- "billing",
31
17
  "component_definitions",
32
18
  "data_sinks",
33
19
  "data_sources",
34
- "deprecated",
35
20
  "evals",
36
21
  "files",
37
22
  "parsing",
@@ -32,7 +32,6 @@ class ComponentDefinitionsClient:
32
32
 
33
33
  client = LlamaCloud(
34
34
  token="YOUR_TOKEN",
35
- base_url="https://yourhost.com/path/to/api",
36
35
  )
37
36
  client.component_definitions.get_all_transformation_definitions()
38
37
  """
@@ -61,7 +60,6 @@ class ComponentDefinitionsClient:
61
60
 
62
61
  client = LlamaCloud(
63
62
  token="YOUR_TOKEN",
64
- base_url="https://yourhost.com/path/to/api",
65
63
  )
66
64
  client.component_definitions.get_all_data_source_definitions()
67
65
  """
@@ -88,7 +86,6 @@ class ComponentDefinitionsClient:
88
86
 
89
87
  client = LlamaCloud(
90
88
  token="YOUR_TOKEN",
91
- base_url="https://yourhost.com/path/to/api",
92
89
  )
93
90
  client.component_definitions.get_all_data_sink_definitions()
94
91
  """
@@ -120,7 +117,6 @@ class AsyncComponentDefinitionsClient:
120
117
 
121
118
  client = AsyncLlamaCloud(
122
119
  token="YOUR_TOKEN",
123
- base_url="https://yourhost.com/path/to/api",
124
120
  )
125
121
  await client.component_definitions.get_all_transformation_definitions()
126
122
  """
@@ -149,7 +145,6 @@ class AsyncComponentDefinitionsClient:
149
145
 
150
146
  client = AsyncLlamaCloud(
151
147
  token="YOUR_TOKEN",
152
- base_url="https://yourhost.com/path/to/api",
153
148
  )
154
149
  await client.component_definitions.get_all_data_source_definitions()
155
150
  """
@@ -176,7 +171,6 @@ class AsyncComponentDefinitionsClient:
176
171
 
177
172
  client = AsyncLlamaCloud(
178
173
  token="YOUR_TOKEN",
179
- base_url="https://yourhost.com/path/to/api",
180
174
  )
181
175
  await client.component_definitions.get_all_data_sink_definitions()
182
176
  """
@@ -43,7 +43,6 @@ class DataSinksClient:
43
43
 
44
44
  client = LlamaCloud(
45
45
  token="YOUR_TOKEN",
46
- base_url="https://yourhost.com/path/to/api",
47
46
  )
48
47
  client.data_sinks.list_data_sinks()
49
48
  """
@@ -78,7 +77,6 @@ class DataSinksClient:
78
77
 
79
78
  client = LlamaCloud(
80
79
  token="YOUR_TOKEN",
81
- base_url="https://yourhost.com/path/to/api",
82
80
  )
83
81
  client.data_sinks.create_data_sink(
84
82
  request=DataSinkCreate(
@@ -120,7 +118,6 @@ class DataSinksClient:
120
118
 
121
119
  client = LlamaCloud(
122
120
  token="YOUR_TOKEN",
123
- base_url="https://yourhost.com/path/to/api",
124
121
  )
125
122
  client.data_sinks.upsert_data_sink(
126
123
  request=DataSinkCreate(
@@ -158,7 +155,6 @@ class DataSinksClient:
158
155
 
159
156
  client = LlamaCloud(
160
157
  token="YOUR_TOKEN",
161
- base_url="https://yourhost.com/path/to/api",
162
158
  )
163
159
  client.data_sinks.get_data_sink(
164
160
  data_sink_id="string",
@@ -205,7 +201,6 @@ class DataSinksClient:
205
201
 
206
202
  client = LlamaCloud(
207
203
  token="YOUR_TOKEN",
208
- base_url="https://yourhost.com/path/to/api",
209
204
  )
210
205
  client.data_sinks.update_data_sink(
211
206
  data_sink_id="string",
@@ -245,7 +240,6 @@ class DataSinksClient:
245
240
 
246
241
  client = LlamaCloud(
247
242
  token="YOUR_TOKEN",
248
- base_url="https://yourhost.com/path/to/api",
249
243
  )
250
244
  client.data_sinks.delete_data_sink(
251
245
  data_sink_id="string",
@@ -284,7 +278,6 @@ class AsyncDataSinksClient:
284
278
 
285
279
  client = AsyncLlamaCloud(
286
280
  token="YOUR_TOKEN",
287
- base_url="https://yourhost.com/path/to/api",
288
281
  )
289
282
  await client.data_sinks.list_data_sinks()
290
283
  """
@@ -319,7 +312,6 @@ class AsyncDataSinksClient:
319
312
 
320
313
  client = AsyncLlamaCloud(
321
314
  token="YOUR_TOKEN",
322
- base_url="https://yourhost.com/path/to/api",
323
315
  )
324
316
  await client.data_sinks.create_data_sink(
325
317
  request=DataSinkCreate(
@@ -361,7 +353,6 @@ class AsyncDataSinksClient:
361
353
 
362
354
  client = AsyncLlamaCloud(
363
355
  token="YOUR_TOKEN",
364
- base_url="https://yourhost.com/path/to/api",
365
356
  )
366
357
  await client.data_sinks.upsert_data_sink(
367
358
  request=DataSinkCreate(
@@ -399,7 +390,6 @@ class AsyncDataSinksClient:
399
390
 
400
391
  client = AsyncLlamaCloud(
401
392
  token="YOUR_TOKEN",
402
- base_url="https://yourhost.com/path/to/api",
403
393
  )
404
394
  await client.data_sinks.get_data_sink(
405
395
  data_sink_id="string",
@@ -446,7 +436,6 @@ class AsyncDataSinksClient:
446
436
 
447
437
  client = AsyncLlamaCloud(
448
438
  token="YOUR_TOKEN",
449
- base_url="https://yourhost.com/path/to/api",
450
439
  )
451
440
  await client.data_sinks.update_data_sink(
452
441
  data_sink_id="string",
@@ -486,7 +475,6 @@ class AsyncDataSinksClient:
486
475
 
487
476
  client = AsyncLlamaCloud(
488
477
  token="YOUR_TOKEN",
489
- base_url="https://yourhost.com/path/to/api",
490
478
  )
491
479
  await client.data_sinks.delete_data_sink(
492
480
  data_sink_id="string",
@@ -44,7 +44,6 @@ class DataSourcesClient:
44
44
 
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
- base_url="https://yourhost.com/path/to/api",
48
47
  )
49
48
  client.data_sources.list_data_sources()
50
49
  """
@@ -79,7 +78,6 @@ class DataSourcesClient:
79
78
 
80
79
  client = LlamaCloud(
81
80
  token="YOUR_TOKEN",
82
- base_url="https://yourhost.com/path/to/api",
83
81
  )
84
82
  client.data_sources.create_data_source(
85
83
  request=DataSourceCreate(
@@ -121,7 +119,6 @@ class DataSourcesClient:
121
119
 
122
120
  client = LlamaCloud(
123
121
  token="YOUR_TOKEN",
124
- base_url="https://yourhost.com/path/to/api",
125
122
  )
126
123
  client.data_sources.upsert_data_source(
127
124
  request=DataSourceCreate(
@@ -159,7 +156,6 @@ class DataSourcesClient:
159
156
 
160
157
  client = LlamaCloud(
161
158
  token="YOUR_TOKEN",
162
- base_url="https://yourhost.com/path/to/api",
163
159
  )
164
160
  client.data_sources.get_data_source(
165
161
  data_source_id="string",
@@ -209,7 +205,6 @@ class DataSourcesClient:
209
205
 
210
206
  client = LlamaCloud(
211
207
  token="YOUR_TOKEN",
212
- base_url="https://yourhost.com/path/to/api",
213
208
  )
214
209
  client.data_sources.update_data_source(
215
210
  data_source_id="string",
@@ -251,7 +246,6 @@ class DataSourcesClient:
251
246
 
252
247
  client = LlamaCloud(
253
248
  token="YOUR_TOKEN",
254
- base_url="https://yourhost.com/path/to/api",
255
249
  )
256
250
  client.data_sources.delete_data_source(
257
251
  data_source_id="string",
@@ -290,7 +284,6 @@ class AsyncDataSourcesClient:
290
284
 
291
285
  client = AsyncLlamaCloud(
292
286
  token="YOUR_TOKEN",
293
- base_url="https://yourhost.com/path/to/api",
294
287
  )
295
288
  await client.data_sources.list_data_sources()
296
289
  """
@@ -327,7 +320,6 @@ class AsyncDataSourcesClient:
327
320
 
328
321
  client = AsyncLlamaCloud(
329
322
  token="YOUR_TOKEN",
330
- base_url="https://yourhost.com/path/to/api",
331
323
  )
332
324
  await client.data_sources.create_data_source(
333
325
  request=DataSourceCreate(
@@ -371,7 +363,6 @@ class AsyncDataSourcesClient:
371
363
 
372
364
  client = AsyncLlamaCloud(
373
365
  token="YOUR_TOKEN",
374
- base_url="https://yourhost.com/path/to/api",
375
366
  )
376
367
  await client.data_sources.upsert_data_source(
377
368
  request=DataSourceCreate(
@@ -409,7 +400,6 @@ class AsyncDataSourcesClient:
409
400
 
410
401
  client = AsyncLlamaCloud(
411
402
  token="YOUR_TOKEN",
412
- base_url="https://yourhost.com/path/to/api",
413
403
  )
414
404
  await client.data_sources.get_data_source(
415
405
  data_source_id="string",
@@ -459,7 +449,6 @@ class AsyncDataSourcesClient:
459
449
 
460
450
  client = AsyncLlamaCloud(
461
451
  token="YOUR_TOKEN",
462
- base_url="https://yourhost.com/path/to/api",
463
452
  )
464
453
  await client.data_sources.update_data_source(
465
454
  data_source_id="string",
@@ -501,7 +490,6 @@ class AsyncDataSourcesClient:
501
490
 
502
491
  client = AsyncLlamaCloud(
503
492
  token="YOUR_TOKEN",
504
- base_url="https://yourhost.com/path/to/api",
505
493
  )
506
494
  await client.data_sources.delete_data_source(
507
495
  data_source_id="string",
@@ -41,7 +41,6 @@ class EvalsClient:
41
41
 
42
42
  client = LlamaCloud(
43
43
  token="YOUR_TOKEN",
44
- base_url="https://yourhost.com/path/to/api",
45
44
  )
46
45
  client.evals.get_dataset(
47
46
  dataset_id="string",
@@ -76,7 +75,6 @@ class EvalsClient:
76
75
 
77
76
  client = LlamaCloud(
78
77
  token="YOUR_TOKEN",
79
- base_url="https://yourhost.com/path/to/api",
80
78
  )
81
79
  client.evals.update_dataset(
82
80
  dataset_id="string",
@@ -111,7 +109,6 @@ class EvalsClient:
111
109
 
112
110
  client = LlamaCloud(
113
111
  token="YOUR_TOKEN",
114
- base_url="https://yourhost.com/path/to/api",
115
112
  )
116
113
  client.evals.delete_dataset(
117
114
  dataset_id="string",
@@ -144,7 +141,6 @@ class EvalsClient:
144
141
 
145
142
  client = LlamaCloud(
146
143
  token="YOUR_TOKEN",
147
- base_url="https://yourhost.com/path/to/api",
148
144
  )
149
145
  client.evals.get_questions(
150
146
  dataset_id="string",
@@ -182,7 +178,6 @@ class EvalsClient:
182
178
 
183
179
  client = LlamaCloud(
184
180
  token="YOUR_TOKEN",
185
- base_url="https://yourhost.com/path/to/api",
186
181
  )
187
182
  client.evals.create_question(
188
183
  dataset_id="string",
@@ -225,7 +220,6 @@ class EvalsClient:
225
220
 
226
221
  client = LlamaCloud(
227
222
  token="YOUR_TOKEN",
228
- base_url="https://yourhost.com/path/to/api",
229
223
  )
230
224
  client.evals.create_questions(
231
225
  dataset_id="string",
@@ -262,7 +256,6 @@ class EvalsClient:
262
256
 
263
257
  client = LlamaCloud(
264
258
  token="YOUR_TOKEN",
265
- base_url="https://yourhost.com/path/to/api",
266
259
  )
267
260
  client.evals.get_question(
268
261
  question_id="string",
@@ -298,7 +291,6 @@ class EvalsClient:
298
291
 
299
292
  client = LlamaCloud(
300
293
  token="YOUR_TOKEN",
301
- base_url="https://yourhost.com/path/to/api",
302
294
  )
303
295
  client.evals.replace_question(
304
296
  question_id="string",
@@ -335,7 +327,6 @@ class EvalsClient:
335
327
 
336
328
  client = LlamaCloud(
337
329
  token="YOUR_TOKEN",
338
- base_url="https://yourhost.com/path/to/api",
339
330
  )
340
331
  client.evals.delete_question(
341
332
  question_id="string",
@@ -366,7 +357,6 @@ class EvalsClient:
366
357
 
367
358
  client = LlamaCloud(
368
359
  token="YOUR_TOKEN",
369
- base_url="https://yourhost.com/path/to/api",
370
360
  )
371
361
  client.evals.get_supported_models()
372
362
  """
@@ -402,7 +392,6 @@ class AsyncEvalsClient:
402
392
 
403
393
  client = AsyncLlamaCloud(
404
394
  token="YOUR_TOKEN",
405
- base_url="https://yourhost.com/path/to/api",
406
395
  )
407
396
  await client.evals.get_dataset(
408
397
  dataset_id="string",
@@ -437,7 +426,6 @@ class AsyncEvalsClient:
437
426
 
438
427
  client = AsyncLlamaCloud(
439
428
  token="YOUR_TOKEN",
440
- base_url="https://yourhost.com/path/to/api",
441
429
  )
442
430
  await client.evals.update_dataset(
443
431
  dataset_id="string",
@@ -472,7 +460,6 @@ class AsyncEvalsClient:
472
460
 
473
461
  client = AsyncLlamaCloud(
474
462
  token="YOUR_TOKEN",
475
- base_url="https://yourhost.com/path/to/api",
476
463
  )
477
464
  await client.evals.delete_dataset(
478
465
  dataset_id="string",
@@ -505,7 +492,6 @@ class AsyncEvalsClient:
505
492
 
506
493
  client = AsyncLlamaCloud(
507
494
  token="YOUR_TOKEN",
508
- base_url="https://yourhost.com/path/to/api",
509
495
  )
510
496
  await client.evals.get_questions(
511
497
  dataset_id="string",
@@ -543,7 +529,6 @@ class AsyncEvalsClient:
543
529
 
544
530
  client = AsyncLlamaCloud(
545
531
  token="YOUR_TOKEN",
546
- base_url="https://yourhost.com/path/to/api",
547
532
  )
548
533
  await client.evals.create_question(
549
534
  dataset_id="string",
@@ -586,7 +571,6 @@ class AsyncEvalsClient:
586
571
 
587
572
  client = AsyncLlamaCloud(
588
573
  token="YOUR_TOKEN",
589
- base_url="https://yourhost.com/path/to/api",
590
574
  )
591
575
  await client.evals.create_questions(
592
576
  dataset_id="string",
@@ -623,7 +607,6 @@ class AsyncEvalsClient:
623
607
 
624
608
  client = AsyncLlamaCloud(
625
609
  token="YOUR_TOKEN",
626
- base_url="https://yourhost.com/path/to/api",
627
610
  )
628
611
  await client.evals.get_question(
629
612
  question_id="string",
@@ -659,7 +642,6 @@ class AsyncEvalsClient:
659
642
 
660
643
  client = AsyncLlamaCloud(
661
644
  token="YOUR_TOKEN",
662
- base_url="https://yourhost.com/path/to/api",
663
645
  )
664
646
  await client.evals.replace_question(
665
647
  question_id="string",
@@ -696,7 +678,6 @@ class AsyncEvalsClient:
696
678
 
697
679
  client = AsyncLlamaCloud(
698
680
  token="YOUR_TOKEN",
699
- base_url="https://yourhost.com/path/to/api",
700
681
  )
701
682
  await client.evals.delete_question(
702
683
  question_id="string",
@@ -727,7 +708,6 @@ class AsyncEvalsClient:
727
708
 
728
709
  client = AsyncLlamaCloud(
729
710
  token="YOUR_TOKEN",
730
- base_url="https://yourhost.com/path/to/api",
731
711
  )
732
712
  await client.evals.get_supported_models()
733
713
  """
@@ -44,7 +44,6 @@ class FilesClient:
44
44
 
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
- base_url="https://yourhost.com/path/to/api",
48
47
  )
49
48
  client.files.read_file(
50
49
  id="string",
@@ -80,7 +79,6 @@ class FilesClient:
80
79
 
81
80
  client = LlamaCloud(
82
81
  token="YOUR_TOKEN",
83
- base_url="https://yourhost.com/path/to/api",
84
82
  )
85
83
  client.files.delete_file(
86
84
  id="string",
@@ -114,7 +112,6 @@ class FilesClient:
114
112
 
115
113
  client = LlamaCloud(
116
114
  token="YOUR_TOKEN",
117
- base_url="https://yourhost.com/path/to/api",
118
115
  )
119
116
  client.files.read_files()
120
117
  """
@@ -193,7 +190,6 @@ class FilesClient:
193
190
 
194
191
  client = LlamaCloud(
195
192
  token="YOUR_TOKEN",
196
- base_url="https://yourhost.com/path/to/api",
197
193
  )
198
194
  client.files.generate_presigned_url(
199
195
  name="string",
@@ -237,7 +233,6 @@ class FilesClient:
237
233
 
238
234
  client = LlamaCloud(
239
235
  token="YOUR_TOKEN",
240
- base_url="https://yourhost.com/path/to/api",
241
236
  )
242
237
  client.files.sync_files()
243
238
  """
@@ -271,7 +266,6 @@ class FilesClient:
271
266
 
272
267
  client = LlamaCloud(
273
268
  token="YOUR_TOKEN",
274
- base_url="https://yourhost.com/path/to/api",
275
269
  )
276
270
  client.files.read_file_content(
277
271
  id="string",
@@ -312,7 +306,6 @@ class AsyncFilesClient:
312
306
 
313
307
  client = AsyncLlamaCloud(
314
308
  token="YOUR_TOKEN",
315
- base_url="https://yourhost.com/path/to/api",
316
309
  )
317
310
  await client.files.read_file(
318
311
  id="string",
@@ -348,7 +341,6 @@ class AsyncFilesClient:
348
341
 
349
342
  client = AsyncLlamaCloud(
350
343
  token="YOUR_TOKEN",
351
- base_url="https://yourhost.com/path/to/api",
352
344
  )
353
345
  await client.files.delete_file(
354
346
  id="string",
@@ -382,7 +374,6 @@ class AsyncFilesClient:
382
374
 
383
375
  client = AsyncLlamaCloud(
384
376
  token="YOUR_TOKEN",
385
- base_url="https://yourhost.com/path/to/api",
386
377
  )
387
378
  await client.files.read_files()
388
379
  """
@@ -461,7 +452,6 @@ class AsyncFilesClient:
461
452
 
462
453
  client = AsyncLlamaCloud(
463
454
  token="YOUR_TOKEN",
464
- base_url="https://yourhost.com/path/to/api",
465
455
  )
466
456
  await client.files.generate_presigned_url(
467
457
  name="string",
@@ -505,7 +495,6 @@ class AsyncFilesClient:
505
495
 
506
496
  client = AsyncLlamaCloud(
507
497
  token="YOUR_TOKEN",
508
- base_url="https://yourhost.com/path/to/api",
509
498
  )
510
499
  await client.files.sync_files()
511
500
  """
@@ -539,7 +528,6 @@ class AsyncFilesClient:
539
528
 
540
529
  client = AsyncLlamaCloud(
541
530
  token="YOUR_TOKEN",
542
- base_url="https://yourhost.com/path/to/api",
543
531
  )
544
532
  await client.files.read_file_content(
545
533
  id="string",