lfx-nightly 0.2.0.dev26__py3-none-any.whl → 0.2.1.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. lfx/_assets/component_index.json +1 -1
  2. lfx/base/agents/agent.py +9 -4
  3. lfx/base/agents/altk_base_agent.py +16 -3
  4. lfx/base/agents/altk_tool_wrappers.py +1 -1
  5. lfx/base/agents/utils.py +4 -0
  6. lfx/base/composio/composio_base.py +78 -41
  7. lfx/base/data/base_file.py +14 -4
  8. lfx/base/data/cloud_storage_utils.py +156 -0
  9. lfx/base/data/docling_utils.py +191 -65
  10. lfx/base/data/storage_utils.py +109 -0
  11. lfx/base/datastax/astradb_base.py +75 -64
  12. lfx/base/mcp/util.py +2 -2
  13. lfx/base/models/__init__.py +11 -1
  14. lfx/base/models/anthropic_constants.py +21 -12
  15. lfx/base/models/google_generative_ai_constants.py +33 -9
  16. lfx/base/models/model_metadata.py +6 -0
  17. lfx/base/models/ollama_constants.py +196 -30
  18. lfx/base/models/openai_constants.py +37 -10
  19. lfx/base/models/unified_models.py +1123 -0
  20. lfx/base/models/watsonx_constants.py +36 -0
  21. lfx/base/tools/component_tool.py +2 -9
  22. lfx/cli/commands.py +6 -1
  23. lfx/cli/run.py +65 -409
  24. lfx/cli/script_loader.py +13 -3
  25. lfx/components/__init__.py +0 -3
  26. lfx/components/composio/github_composio.py +1 -1
  27. lfx/components/cuga/cuga_agent.py +39 -27
  28. lfx/components/data_source/api_request.py +4 -2
  29. lfx/components/docling/__init__.py +45 -11
  30. lfx/components/docling/chunk_docling_document.py +3 -1
  31. lfx/components/docling/docling_inline.py +39 -49
  32. lfx/components/docling/export_docling_document.py +3 -1
  33. lfx/components/elastic/opensearch_multimodal.py +215 -57
  34. lfx/components/files_and_knowledge/file.py +439 -39
  35. lfx/components/files_and_knowledge/ingestion.py +8 -0
  36. lfx/components/files_and_knowledge/retrieval.py +10 -0
  37. lfx/components/files_and_knowledge/save_file.py +123 -53
  38. lfx/components/ibm/watsonx.py +7 -1
  39. lfx/components/input_output/chat_output.py +7 -1
  40. lfx/components/langchain_utilities/tool_calling.py +14 -6
  41. lfx/components/llm_operations/batch_run.py +80 -25
  42. lfx/components/llm_operations/lambda_filter.py +33 -6
  43. lfx/components/llm_operations/llm_conditional_router.py +39 -7
  44. lfx/components/llm_operations/structured_output.py +38 -12
  45. lfx/components/models/__init__.py +16 -74
  46. lfx/components/models_and_agents/agent.py +51 -201
  47. lfx/components/models_and_agents/embedding_model.py +185 -339
  48. lfx/components/models_and_agents/language_model.py +54 -318
  49. lfx/components/models_and_agents/mcp_component.py +58 -9
  50. lfx/components/ollama/ollama.py +9 -4
  51. lfx/components/ollama/ollama_embeddings.py +2 -1
  52. lfx/components/openai/openai_chat_model.py +1 -1
  53. lfx/components/processing/__init__.py +0 -3
  54. lfx/components/vllm/__init__.py +37 -0
  55. lfx/components/vllm/vllm.py +141 -0
  56. lfx/components/vllm/vllm_embeddings.py +110 -0
  57. lfx/custom/custom_component/custom_component.py +8 -6
  58. lfx/custom/directory_reader/directory_reader.py +5 -2
  59. lfx/graph/utils.py +64 -18
  60. lfx/inputs/__init__.py +2 -0
  61. lfx/inputs/input_mixin.py +54 -0
  62. lfx/inputs/inputs.py +115 -0
  63. lfx/interface/initialize/loading.py +42 -12
  64. lfx/io/__init__.py +2 -0
  65. lfx/run/__init__.py +5 -0
  66. lfx/run/base.py +494 -0
  67. lfx/schema/data.py +1 -1
  68. lfx/schema/image.py +28 -19
  69. lfx/schema/message.py +19 -3
  70. lfx/services/interfaces.py +5 -0
  71. lfx/services/manager.py +5 -4
  72. lfx/services/mcp_composer/service.py +45 -13
  73. lfx/services/settings/auth.py +18 -11
  74. lfx/services/settings/base.py +12 -24
  75. lfx/services/settings/constants.py +2 -0
  76. lfx/services/storage/local.py +37 -0
  77. lfx/services/storage/service.py +19 -0
  78. lfx/utils/constants.py +1 -0
  79. lfx/utils/image.py +29 -11
  80. lfx/utils/validate_cloud.py +14 -3
  81. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/METADATA +5 -2
  82. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/RECORD +84 -78
  83. lfx/components/processing/dataframe_to_toolset.py +0 -259
  84. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/WHEEL +0 -0
  85. {lfx_nightly-0.2.0.dev26.dist-info → lfx_nightly-0.2.1.dev7.dist-info}/entry_points.txt +0 -0
lfx/base/agents/agent.py CHANGED
@@ -71,8 +71,8 @@ class LCAgentComponent(Component):
71
71
  ]
72
72
 
73
73
  outputs = [
74
- Output(display_name="Agent", name="agent", method="build_agent", hidden=True, tool_mode=False),
75
74
  Output(display_name="Response", name="response", method="message_response"),
75
+ Output(display_name="Agent", name="agent", method="build_agent", tool_mode=False),
76
76
  ]
77
77
 
78
78
  # Get shared callbacks for tracing and save them to self.shared_callbacks
@@ -181,7 +181,11 @@ class LCAgentComponent(Component):
181
181
  else:
182
182
  input_dict = {"input": self.input_value}
183
183
 
184
- if hasattr(self, "system_prompt"):
184
+ # Ensure input_dict is initialized
185
+ if "input" not in input_dict:
186
+ input_dict = {"input": self.input_value}
187
+
188
+ if hasattr(self, "system_prompt") and self.system_prompt and self.system_prompt.strip():
185
189
  input_dict["system_prompt"] = self.system_prompt
186
190
 
187
191
  if hasattr(self, "chat_history") and self.chat_history:
@@ -196,8 +200,9 @@ class LCAgentComponent(Component):
196
200
  # Note: Agent input must be a string, so we extract text and move images to chat_history
197
201
  if lc_message is not None and hasattr(lc_message, "content") and isinstance(lc_message.content, list):
198
202
  # Extract images and text from the text content items
199
- image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
200
- text_content = [item for item in lc_message.content if item.get("type") != "image"]
203
+ # Support both "image" (legacy) and "image_url" (standard) types
204
+ image_dicts = [item for item in lc_message.content if item.get("type") in ("image", "image_url")]
205
+ text_content = [item for item in lc_message.content if item.get("type") not in ("image", "image_url")]
201
206
 
202
207
  text_strings = [
203
208
  item.get("text", "")
@@ -319,9 +319,9 @@ class ALTKBaseAgentComponent(AgentComponent):
319
319
  input_dict["chat_history"] = data_to_messages([m.to_data() for m in self.chat_history])
320
320
  if hasattr(lc_message, "content") and isinstance(lc_message.content, list):
321
321
  # ! Because the input has to be a string, we must pass the images in the chat_history
322
-
323
- image_dicts = [item for item in lc_message.content if item.get("type") == "image"]
324
- lc_message.content = [item for item in lc_message.content if item.get("type") != "image"]
322
+ # Support both "image" (legacy) and "image_url" (standard) types
323
+ image_dicts = [item for item in lc_message.content if item.get("type") in ("image", "image_url")]
324
+ lc_message.content = [item for item in lc_message.content if item.get("type") not in ("image", "image_url")]
325
325
 
326
326
  if "chat_history" not in input_dict:
327
327
  input_dict["chat_history"] = []
@@ -330,6 +330,19 @@ class ALTKBaseAgentComponent(AgentComponent):
330
330
  else:
331
331
  input_dict["chat_history"] = [HumanMessage(content=[image_dict]) for image_dict in image_dicts]
332
332
  input_dict["input"] = input_text
333
+
334
+ # Copied from agent.py
335
+ # Final safety check: ensure input is never empty (prevents Anthropic API errors)
336
+ current_input = input_dict.get("input", "")
337
+ if isinstance(current_input, list):
338
+ current_input = " ".join(map(str, current_input))
339
+ elif not isinstance(current_input, str):
340
+ current_input = str(current_input)
341
+ if not current_input.strip():
342
+ input_dict["input"] = "Continue the conversation."
343
+ else:
344
+ input_dict["input"] = current_input
345
+
333
346
  if hasattr(self, "graph"):
334
347
  session_id = self.graph.session_id
335
348
  elif hasattr(self, "_session_id"):
@@ -513,7 +513,7 @@ class PostToolProcessor(ALTKBaseTool):
513
513
  output = None
514
514
  try:
515
515
  output = middleware.process(input_data, AgentPhase.RUNTIME)
516
- except (AttributeError, TypeError, ValueError, RuntimeError) as e:
516
+ except Exception as e: # noqa: BLE001
517
517
  logger.error(f"Exception in executing CodeGenerationComponent: {e}")
518
518
  if output is not None and hasattr(output, "result"):
519
519
  logger.info(f"Output of CodeGenerationComponent: {output.result}")
lfx/base/agents/utils.py CHANGED
@@ -224,6 +224,10 @@ def get_chat_output_sender_name(self) -> str | None:
224
224
  if not hasattr(self, "graph") or not self.graph:
225
225
  return None
226
226
 
227
+ # Check if graph has vertices attribute (PlaceholderGraph doesn't)
228
+ if not hasattr(self.graph, "vertices"):
229
+ return None
230
+
227
231
  for vertex in self.graph.vertices:
228
232
  # Safely check if vertex has data attribute, correct type, and raw_params
229
233
  if (
@@ -41,6 +41,58 @@ class ComposioBaseComponent(Component):
41
41
 
42
42
  default_tools_limit: int = 5
43
43
 
44
+ # Reserved attribute names that conflict with Component base class
45
+ RESERVED_ATTRIBUTES: set[str] = {
46
+ # Core component attributes
47
+ "name",
48
+ "description",
49
+ "status",
50
+ "display_name",
51
+ "icon",
52
+ "priority",
53
+ "code",
54
+ "inputs",
55
+ "outputs",
56
+ "selected_output",
57
+ # Properties and methods
58
+ "trace_type",
59
+ "trace_name",
60
+ "function",
61
+ "repr_value",
62
+ "field_config",
63
+ "field_order",
64
+ "frozen",
65
+ "build_parameters",
66
+ "cache",
67
+ "tools_metadata",
68
+ "vertex",
69
+ # User and session attributes
70
+ "user_id", # Already handled separately but included for completeness
71
+ "session_id",
72
+ "flow_id",
73
+ "flow_name",
74
+ "context",
75
+ # Common method names
76
+ "build",
77
+ "run",
78
+ "stop",
79
+ "start",
80
+ "validate",
81
+ "get_function",
82
+ "set_attributes",
83
+ # Additional common conflicts
84
+ "id",
85
+ "type",
86
+ "value",
87
+ "metadata",
88
+ "logs",
89
+ "results",
90
+ "artifacts",
91
+ "parameters",
92
+ "template",
93
+ "config",
94
+ }
95
+
44
96
  _base_inputs = [
45
97
  MessageTextInput(
46
98
  name="entity_id",
@@ -623,13 +675,9 @@ class ComposioBaseComponent(Component):
623
675
  attachment_related_found = True
624
676
  continue # Skip individual attachment fields
625
677
 
626
- # Handle conflicting field names - rename user_id to avoid conflicts with entity_id
627
- if clean_field == "user_id":
628
- clean_field = f"{self.app_name}_user_id"
629
-
630
- # Handle reserved attribute name conflicts (e.g., 'status', 'name')
678
+ # Handle reserved attribute name conflicts
631
679
  # Prefix with app name to prevent clashes with component attributes
632
- if clean_field in {"status", "name"}:
680
+ if clean_field in self.RESERVED_ATTRIBUTES:
633
681
  clean_field = f"{self.app_name}_{clean_field}"
634
682
 
635
683
  action_fields.append(clean_field)
@@ -795,28 +843,16 @@ class ComposioBaseComponent(Component):
795
843
  # Don't add individual attachment sub-fields to the schema
796
844
  continue
797
845
 
798
- # Handle conflicting field names - rename user_id to avoid conflicts with entity_id
799
- if clean_field_name == "user_id":
800
- clean_field_name = f"{self.app_name}_user_id"
846
+ # Handle reserved attribute name conflicts
847
+ if clean_field_name in self.RESERVED_ATTRIBUTES:
848
+ original_name = clean_field_name
849
+ clean_field_name = f"{self.app_name}_{clean_field_name}"
801
850
  # Update the field schema description to reflect the name change
802
851
  field_schema_copy = field_schema.copy()
852
+ original_description = field_schema.get("description", "")
803
853
  field_schema_copy["description"] = (
804
- f"User ID for {self.app_name.title()}: " + field_schema["description"]
805
- )
806
- elif clean_field_name == "status":
807
- clean_field_name = f"{self.app_name}_status"
808
- # Update the field schema description to reflect the name change
809
- field_schema_copy = field_schema.copy()
810
- field_schema_copy["description"] = f"Status for {self.app_name.title()}: " + field_schema.get(
811
- "description", ""
812
- )
813
- elif clean_field_name == "name":
814
- clean_field_name = f"{self.app_name}_name"
815
- # Update the field schema description to reflect the name change
816
- field_schema_copy = field_schema.copy()
817
- field_schema_copy["description"] = f"Name for {self.app_name.title()}: " + field_schema.get(
818
- "description", ""
819
- )
854
+ f"{original_name.replace('_', ' ').title()} for {self.app_name.title()}: {original_description}"
855
+ ).strip()
820
856
  else:
821
857
  # Use the original field schema for all other fields
822
858
  field_schema_copy = field_schema
@@ -842,12 +878,8 @@ class ComposioBaseComponent(Component):
842
878
  cleaned_required = []
843
879
  for field in flat_schema["required"]:
844
880
  base = field.replace("[0]", "")
845
- if base == "user_id":
846
- cleaned_required.append(f"{self.app_name}_user_id")
847
- elif base == "status":
848
- cleaned_required.append(f"{self.app_name}_status")
849
- elif base == "name":
850
- cleaned_required.append(f"{self.app_name}_name")
881
+ if base in self.RESERVED_ATTRIBUTES:
882
+ cleaned_required.append(f"{self.app_name}_{base}")
851
883
  else:
852
884
  cleaned_required.append(base)
853
885
  flat_schema["required"] = cleaned_required
@@ -943,9 +975,10 @@ class ComposioBaseComponent(Component):
943
975
  inp.advanced = True
944
976
 
945
977
  # Skip entity_id being mapped to user_id parameter
946
- if inp.name == "user_id" and getattr(self, "entity_id", None) == getattr(
947
- inp, "value", None
948
- ):
978
+ # Check both original name and renamed version
979
+ if inp.name in {"user_id", f"{self.app_name}_user_id"} and getattr(
980
+ self, "entity_id", None
981
+ ) == getattr(inp, "value", None):
949
982
  continue
950
983
 
951
984
  processed_inputs.append(inp)
@@ -2422,12 +2455,11 @@ class ComposioBaseComponent(Component):
2422
2455
 
2423
2456
  # Handle renamed fields - map back to original names for API execution
2424
2457
  final_field_name = field
2425
- if field.endswith("_user_id") and field.startswith(self.app_name):
2426
- final_field_name = "user_id"
2427
- elif field == f"{self.app_name}_status":
2428
- final_field_name = "status"
2429
- elif field == f"{self.app_name}_name":
2430
- final_field_name = "name"
2458
+ # Check if this is a renamed reserved attribute
2459
+ if field.startswith(f"{self.app_name}_"):
2460
+ potential_original = field[len(self.app_name) + 1 :] # Remove app_name prefix
2461
+ if potential_original in self.RESERVED_ATTRIBUTES:
2462
+ final_field_name = potential_original
2431
2463
 
2432
2464
  arguments[final_field_name] = value
2433
2465
 
@@ -2538,7 +2570,7 @@ class ComposioBaseComponent(Component):
2538
2570
  build_config[fname]["value"] = "" if fname not in self._bool_variables else False
2539
2571
  # Hide any other visible, non-protected fields that look like parameters
2540
2572
  protected = {
2541
- "code",
2573
+ # Component control fields
2542
2574
  "entity_id",
2543
2575
  "api_key",
2544
2576
  "auth_link",
@@ -2570,6 +2602,11 @@ class ComposioBaseComponent(Component):
2570
2602
  "instance_url",
2571
2603
  "tenant_id",
2572
2604
  }
2605
+ # Add all reserved Component attributes to protected set
2606
+ protected.update(self.RESERVED_ATTRIBUTES)
2607
+ # Also add the renamed versions (with app_name prefix) to protected set
2608
+ for attr in self.RESERVED_ATTRIBUTES:
2609
+ protected.add(f"{self.app_name}_{attr}")
2573
2610
  # Add all dynamic auth fields to protected set
2574
2611
  protected.update(self._auth_dynamic_fields)
2575
2612
  # Also protect any auth fields discovered across all instances
@@ -260,8 +260,6 @@ class BaseFileComponent(Component, ABC):
260
260
  filename = file_path_obj.name
261
261
 
262
262
  settings = get_settings_service().settings
263
-
264
- # Get file size - use storage service for S3, filesystem for local
265
263
  if settings.storage_type == "s3":
266
264
  try:
267
265
  file_size = get_file_size(file_path)
@@ -618,9 +616,21 @@ class BaseFileComponent(Component, ABC):
618
616
  BaseFileComponent.BaseFile(data, Path(path_str), delete_after_processing=delete_after_processing)
619
617
  )
620
618
  else:
621
- resolved_path = Path(self.resolve_path(path_str))
619
+ # Check if path looks like a storage path (flow_id/filename format)
620
+ # If so, use get_full_path to resolve it to the actual storage location
621
+ if "/" in path_str and not Path(path_str).is_absolute():
622
+ try:
623
+ resolved_path = Path(self.get_full_path(path_str))
624
+ self.log(f"Resolved storage path '{path_str}' to '{resolved_path}'")
625
+ except (ValueError, AttributeError) as e:
626
+ # Fallback to resolve_path if get_full_path fails
627
+ self.log(f"get_full_path failed for '{path_str}': {e}, falling back to resolve_path")
628
+ resolved_path = Path(self.resolve_path(path_str))
629
+ else:
630
+ resolved_path = Path(self.resolve_path(path_str))
631
+
622
632
  if not resolved_path.exists():
623
- msg = f"File or directory not found: {path}"
633
+ msg = f"File not found: '{path}' (resolved to: '{resolved_path}'). Please upload the file again."
624
634
  self.log(msg)
625
635
  if not self.silent_errors:
626
636
  raise ValueError(msg)
@@ -0,0 +1,156 @@
1
+ """Shared utilities for cloud storage operations (AWS S3 and Google Drive).
2
+
3
+ This module provides common functionality used by both read and write file components
4
+ to avoid code duplication.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ from typing import Any
11
+
12
+
13
+ def validate_aws_credentials(component: Any) -> None:
14
+ """Validate that required AWS S3 credentials are present.
15
+
16
+ Args:
17
+ component: Component instance with AWS credential attributes
18
+
19
+ Raises:
20
+ ValueError: If any required credential is missing
21
+ """
22
+ if not getattr(component, "aws_access_key_id", None):
23
+ msg = "AWS Access Key ID is required for S3 storage"
24
+ raise ValueError(msg)
25
+ if not getattr(component, "aws_secret_access_key", None):
26
+ msg = "AWS Secret Key is required for S3 storage"
27
+ raise ValueError(msg)
28
+ if not getattr(component, "bucket_name", None):
29
+ msg = "S3 Bucket Name is required for S3 storage"
30
+ raise ValueError(msg)
31
+
32
+
33
+ def create_s3_client(component: Any):
34
+ """Create and return a configured boto3 S3 client.
35
+
36
+ Args:
37
+ component: Component instance with AWS credential attributes
38
+
39
+ Returns:
40
+ boto3 S3 client instance
41
+
42
+ Raises:
43
+ ImportError: If boto3 is not installed
44
+ """
45
+ try:
46
+ import boto3
47
+ except ImportError as e:
48
+ msg = "boto3 is not installed. Please install it using `uv pip install boto3`."
49
+ raise ImportError(msg) from e
50
+
51
+ client_config = {
52
+ "aws_access_key_id": component.aws_access_key_id,
53
+ "aws_secret_access_key": component.aws_secret_access_key,
54
+ }
55
+
56
+ if hasattr(component, "aws_region") and component.aws_region:
57
+ client_config["region_name"] = component.aws_region
58
+
59
+ return boto3.client("s3", **client_config)
60
+
61
+
62
+ def parse_google_service_account_key(service_account_key: str) -> dict:
63
+ """Parse Google service account JSON key with multiple fallback strategies.
64
+
65
+ This function handles various common formatting issues when users paste
66
+ service account keys, including:
67
+ - Control characters
68
+ - Extra whitespace
69
+ - Double-encoded JSON strings
70
+ - Escaped newlines in private_key field
71
+
72
+ Args:
73
+ service_account_key: Service account JSON key as string
74
+
75
+ Returns:
76
+ dict: Parsed service account credentials
77
+
78
+ Raises:
79
+ ValueError: If all parsing strategies fail
80
+ """
81
+ credentials_dict = None
82
+ parse_errors = []
83
+
84
+ # Strategy 1: Parse as-is with strict=False to allow control characters
85
+ try:
86
+ credentials_dict = json.loads(service_account_key, strict=False)
87
+ except json.JSONDecodeError as e:
88
+ parse_errors.append(f"Standard parse: {e!s}")
89
+
90
+ # Strategy 2: Strip whitespace and try again
91
+ if credentials_dict is None:
92
+ try:
93
+ cleaned_key = service_account_key.strip()
94
+ credentials_dict = json.loads(cleaned_key, strict=False)
95
+ except json.JSONDecodeError as e:
96
+ parse_errors.append(f"Stripped parse: {e!s}")
97
+
98
+ # Strategy 3: Check if it's double-encoded (JSON string of a JSON string)
99
+ if credentials_dict is None:
100
+ try:
101
+ decoded_once = json.loads(service_account_key, strict=False)
102
+ credentials_dict = json.loads(decoded_once, strict=False) if isinstance(decoded_once, str) else decoded_once
103
+ except json.JSONDecodeError as e:
104
+ parse_errors.append(f"Double-encoded parse: {e!s}")
105
+
106
+ # Strategy 4: Try to fix common issues with newlines in the private_key field
107
+ if credentials_dict is None:
108
+ try:
109
+ # Replace literal \n with actual newlines which is common in pasted JSON
110
+ fixed_key = service_account_key.replace("\\n", "\n")
111
+ credentials_dict = json.loads(fixed_key, strict=False)
112
+ except json.JSONDecodeError as e:
113
+ parse_errors.append(f"Newline-fixed parse: {e!s}")
114
+
115
+ if credentials_dict is None:
116
+ error_details = "; ".join(parse_errors)
117
+ msg = (
118
+ f"Unable to parse service account key JSON. Tried multiple strategies: {error_details}. "
119
+ "Please ensure you've copied the entire JSON content from your service account key file. "
120
+ "The JSON should start with '{' and contain fields like 'type', 'project_id', 'private_key', etc."
121
+ )
122
+ raise ValueError(msg)
123
+
124
+ return credentials_dict
125
+
126
+
127
+ def create_google_drive_service(service_account_key: str, scopes: list[str], *, return_credentials: bool = False):
128
+ """Create and return a configured Google Drive API service.
129
+
130
+ Args:
131
+ service_account_key: Service account JSON key as string
132
+ scopes: List of Google API scopes to request
133
+ return_credentials: If True, return both service and credentials as tuple
134
+
135
+ Returns:
136
+ Google Drive API service instance, or tuple of (service, credentials) if return_credentials=True
137
+
138
+ Raises:
139
+ ImportError: If Google API client libraries are not installed
140
+ ValueError: If credentials cannot be parsed
141
+ """
142
+ try:
143
+ from google.oauth2 import service_account
144
+ from googleapiclient.discovery import build
145
+ except ImportError as e:
146
+ msg = "Google API client libraries are not installed. Please install them."
147
+ raise ImportError(msg) from e
148
+
149
+ credentials_dict = parse_google_service_account_key(service_account_key)
150
+
151
+ credentials = service_account.Credentials.from_service_account_info(credentials_dict, scopes=scopes)
152
+ service = build("drive", "v3", credentials=credentials)
153
+
154
+ if return_credentials:
155
+ return service, credentials
156
+ return service