langflow-base-nightly 0.5.0.dev39__py3-none-any.whl → 0.5.1.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. langflow/api/router.py +2 -0
  2. langflow/api/v1/__init__.py +2 -0
  3. langflow/api/v1/endpoints.py +7 -1
  4. langflow/api/v1/openai_responses.py +545 -0
  5. langflow/components/data/file.py +302 -376
  6. langflow/components/docling/docling_inline.py +56 -4
  7. langflow/components/nvidia/nvidia_ingest.py +3 -2
  8. langflow/components/youtube/channel.py +1 -1
  9. langflow/custom/custom_component/custom_component.py +11 -0
  10. langflow/graph/graph/base.py +3 -1
  11. langflow/initial_setup/starter_projects/Basic Prompt Chaining.json +1 -1
  12. langflow/initial_setup/starter_projects/Basic Prompting.json +1 -1
  13. langflow/initial_setup/starter_projects/Blog Writer.json +2 -2
  14. langflow/initial_setup/starter_projects/Custom Component Generator.json +1 -1
  15. langflow/initial_setup/starter_projects/Document Q&A.json +2 -2
  16. langflow/initial_setup/starter_projects/Financial Report Parser.json +1 -1
  17. langflow/initial_setup/starter_projects/Hybrid Search RAG.json +2 -2
  18. langflow/initial_setup/starter_projects/Image Sentiment Analysis.json +1 -1
  19. langflow/initial_setup/starter_projects/Instagram Copywriter.json +2 -2
  20. langflow/initial_setup/starter_projects/Invoice Summarizer.json +1 -1
  21. langflow/initial_setup/starter_projects/Knowledge Ingestion.json +2 -2
  22. langflow/initial_setup/starter_projects/Knowledge Retrieval.json +1 -1
  23. langflow/initial_setup/starter_projects/Market Research.json +2 -2
  24. langflow/initial_setup/starter_projects/Meeting Summary.json +3 -3
  25. langflow/initial_setup/starter_projects/Memory Chatbot.json +1 -1
  26. langflow/initial_setup/starter_projects/News Aggregator.json +3 -3
  27. langflow/initial_setup/starter_projects/Nvidia Remix.json +2 -2
  28. langflow/initial_setup/starter_projects/Pok/303/251dex Agent.json" +2 -2
  29. langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json +2 -2
  30. langflow/initial_setup/starter_projects/Price Deal Finder.json +3 -3
  31. langflow/initial_setup/starter_projects/Research Agent.json +2 -2
  32. langflow/initial_setup/starter_projects/Research Translation Loop.json +1 -1
  33. langflow/initial_setup/starter_projects/SEO Keyword Generator.json +1 -1
  34. langflow/initial_setup/starter_projects/SaaS Pricing.json +1 -1
  35. langflow/initial_setup/starter_projects/Search agent.json +2 -2
  36. langflow/initial_setup/starter_projects/Sequential Tasks Agents.json +3 -3
  37. langflow/initial_setup/starter_projects/Simple Agent.json +2 -2
  38. langflow/initial_setup/starter_projects/Social Media Agent.json +5 -5
  39. langflow/initial_setup/starter_projects/Text Sentiment Analysis.json +3 -3
  40. langflow/initial_setup/starter_projects/Travel Planning Agents.json +1 -1
  41. langflow/initial_setup/starter_projects/Twitter Thread Generator.json +1 -1
  42. langflow/initial_setup/starter_projects/Vector Store RAG.json +5 -5
  43. langflow/initial_setup/starter_projects/Youtube Analysis.json +2 -2
  44. langflow/schema/openai_responses_schemas.py +74 -0
  45. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/METADATA +1 -1
  46. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/RECORD +48 -46
  47. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/WHEEL +0 -0
  48. {langflow_base_nightly-0.5.0.dev39.dist-info → langflow_base_nightly-0.5.1.dev0.dist-info}/entry_points.txt +0 -0
@@ -808,7 +808,7 @@
808
808
  "dependencies": [
809
809
  {
810
810
  "name": "langchain_text_splitters",
811
- "version": "0.3.8"
811
+ "version": "0.3.9"
812
812
  },
813
813
  {
814
814
  "name": "langflow",
@@ -1114,7 +1114,7 @@
1114
1114
  },
1115
1115
  {
1116
1116
  "name": "fastapi",
1117
- "version": "0.115.13"
1117
+ "version": "0.116.1"
1118
1118
  },
1119
1119
  {
1120
1120
  "name": "langflow",
@@ -2787,7 +2787,7 @@
2787
2787
  },
2788
2788
  {
2789
2789
  "name": "langchain_core",
2790
- "version": "0.3.72"
2790
+ "version": "0.3.75"
2791
2791
  },
2792
2792
  {
2793
2793
  "name": "langflow",
@@ -3584,7 +3584,7 @@
3584
3584
  },
3585
3585
  {
3586
3586
  "name": "langchain_core",
3587
- "version": "0.3.72"
3587
+ "version": "0.3.75"
3588
3588
  },
3589
3589
  {
3590
3590
  "name": "langflow",
@@ -4391,7 +4391,7 @@
4391
4391
  "show": true,
4392
4392
  "title_case": false,
4393
4393
  "type": "code",
4394
- "value": "\"\"\"Enhanced file component v2 with mypy and ruff compliance.\"\"\"\n\nfrom __future__ import annotations\n\nfrom copy import deepcopy\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass MockConversionStatus(Enum):\n \"\"\"Mock ConversionStatus for fallback compatibility.\"\"\"\n\n SUCCESS = \"success\"\n FAILURE = \"failure\"\n\n\nclass MockInputFormat(Enum):\n \"\"\"Mock InputFormat for fallback compatibility.\"\"\"\n\n PDF = \"pdf\"\n IMAGE = \"image\"\n\n\nclass MockImageRefMode(Enum):\n \"\"\"Mock ImageRefMode for fallback compatibility.\"\"\"\n\n PLACEHOLDER = \"placeholder\"\n EMBEDDED = \"embedded\"\n\n\nclass DoclingImports:\n \"\"\"Container for docling imports with type information.\"\"\"\n\n def __init__(\n self,\n conversion_status: type[Enum],\n input_format: type[Enum],\n document_converter: type,\n image_ref_mode: type[Enum],\n strategy: str,\n ) -> None:\n self.conversion_status = conversion_status\n self.input_format = input_format\n self.document_converter = document_converter\n self.image_ref_mode = image_ref_mode\n self.strategy = strategy\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"Enhanced file component v2 that combines standard file loading with optional Docling processing and export.\n\n This component supports all features of the standard File component, plus an advanced mode\n that enables Docling document processing and export to various formats (Markdown, HTML, etc.).\n \"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling supported formats from original component\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n def _path_value(self, template) -> list[str]:\n # Get current path value\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Update build configuration to show/hide fields based on file count and advanced_mode.\"\"\"\n if field_name == \"path\":\n # Get current path value\n path_value = self._path_value(build_config)\n file_path = path_value[0] if len(path_value) > 0 else \"\"\n\n # Show/hide Advanced Parser based on file count (only for single files)\n file_count = len(field_value) if field_value else 0\n if file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n build_config[\"advanced_mode\"][\"show\"] = True\n else:\n build_config[\"advanced_mode\"][\"show\"] = False\n build_config[\"advanced_mode\"][\"value\"] = False # Reset to False when hidden\n\n # Hide all advanced fields when Advanced Parser is not available\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n # Show/hide advanced fields based on advanced_mode (only if single file)\n advanced_fields = [\n \"pipeline\",\n \"ocr_engine\",\n \"doc_key\",\n \"md_image_placeholder\",\n \"md_page_break_placeholder\",\n ]\n\n for field in advanced_fields:\n if field in build_config:\n build_config[field][\"show\"] = field_value\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on the number of files and their types.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n # Add outputs based on the number of files in the path\n template = frontend_node.get(\"template\", {})\n path_value = self._path_value(template)\n if len(path_value) == 0:\n return frontend_node\n\n # Clear existing outputs\n frontend_node[\"outputs\"] = []\n\n if len(path_value) == 1:\n # We need to check if the file is structured content\n file_path = path_value[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n # Add outputs based on advanced mode\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n\n if advanced_mode:\n # Advanced mode: Structured Output, Markdown, and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Normal mode: Raw Content and File Path\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # For multiple files, we show the files output (DataFrame format)\n # Advanced Parser is not available for multiple files\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"),\n )\n\n return frontend_node\n\n def _try_import_docling(self) -> DoclingImports | None:\n \"\"\"Try different import strategies for docling components.\"\"\"\n # Try strategy 1: Latest docling structure\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore[import-untyped]\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n self.log(\"Using latest docling import structure\")\n return DoclingImports(\n conversion_status=ConversionStatus,\n input_format=InputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"latest\",\n )\n except ImportError as e:\n self.log(f\"Latest docling structure failed: {e}\")\n\n # Try strategy 2: Alternative import paths\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n from docling_core.types.doc import ImageRefMode # type: ignore[import-untyped]\n\n # Try to get ConversionStatus from different locations\n conversion_status: type[Enum] = MockConversionStatus\n input_format: type[Enum] = MockInputFormat\n\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore[import-untyped]\n\n conversion_status = ConversionStatus\n input_format = InputFormat\n except ImportError:\n # Use mock enums if we can't find them\n pass\n\n self.log(\"Using alternative docling import structure\")\n return DoclingImports(\n conversion_status=conversion_status,\n input_format=input_format,\n document_converter=DocumentConverter,\n image_ref_mode=ImageRefMode,\n strategy=\"alternative\",\n )\n except ImportError as e:\n self.log(f\"Alternative docling structure failed: {e}\")\n\n # Try strategy 3: Basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore[import-untyped]\n\n self.log(\"Using basic docling import structure with mocks\")\n return DoclingImports(\n conversion_status=MockConversionStatus,\n input_format=MockInputFormat,\n document_converter=DocumentConverter,\n image_ref_mode=MockImageRefMode,\n strategy=\"basic\",\n )\n except ImportError as e:\n self.log(f\"Basic docling structure failed: {e}\")\n\n # Strategy 4: Complete fallback - return None to indicate failure\n return None\n\n def _create_advanced_converter(self, docling_imports: DoclingImports) -> Any:\n \"\"\"Create advanced converter with pipeline options if available.\"\"\"\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore[import-untyped]\n from docling.document_converter import PdfFormatOption # type: ignore[import-untyped]\n\n document_converter = docling_imports.document_converter\n input_format = docling_imports.input_format\n\n # Create basic pipeline options\n pipeline_options = PdfPipelineOptions()\n\n # Configure OCR if specified and available\n if self.ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore[import-untyped]\n\n pipeline_options.do_ocr = True\n ocr_factory = get_ocr_factory(allow_external_plugins=False)\n ocr_options = ocr_factory.create_options(kind=self.ocr_engine)\n pipeline_options.ocr_options = ocr_options\n self.log(f\"Configured OCR with engine: {self.ocr_engine}\")\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not configure OCR: {e}, proceeding without OCR\")\n pipeline_options.do_ocr = False\n\n # Create format options\n pdf_format_option = PdfFormatOption(pipeline_options=pipeline_options)\n format_options = {}\n if hasattr(input_format, \"PDF\"):\n format_options[input_format.PDF] = pdf_format_option\n if hasattr(input_format, \"IMAGE\"):\n format_options[input_format.IMAGE] = pdf_format_option\n\n return document_converter(format_options=format_options)\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Could not create advanced converter: {e}, using basic converter\")\n return docling_imports.document_converter()\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Check if file is compatible with Docling processing.\"\"\"\n # All VALID_EXTENSIONS are Docling compatible (except for TEXT_FILE_TYPES which may overlap)\n docling_extensions = [\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n ]\n return any(file_path.lower().endswith(ext) for ext in docling_extensions)\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process files using standard parsing or Docling based on advanced_mode and file type.\"\"\"\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using standard text parsing.\"\"\"\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n msg = f\"File not found: {file_path}. Error: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n except Exception as e:\n msg = f\"Unexpected error processing {file_path}: {e}\"\n self.log(msg)\n if not silent_errors:\n raise\n return None\n\n def process_file_docling(file_path: str, *, silent_errors: bool = False) -> Data | None:\n \"\"\"Process a single file using Docling if compatible, otherwise standard processing.\"\"\"\n # Try Docling first if file is compatible and advanced mode is enabled\n try:\n return self._process_with_docling_and_export(file_path)\n except Exception as e: # noqa: BLE001\n self.log(f\"Docling processing failed for {file_path}: {e}, falling back to standard processing\")\n if not silent_errors:\n # Return error data instead of raising\n return Data(data={\"error\": f\"Docling processing failed: {e}\", \"file_path\": file_path})\n\n return None\n\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n processed_data = process_file_docling(file_path)\n if not processed_data:\n msg = f\"Failed to process file with Docling: {file_path}\"\n raise ValueError(msg)\n\n # Serialize processed data to match Data structure\n serialized_data = processed_data.serialize_model()\n\n # Now, if doc is nested, we need to unravel it\n clean_data: list[Data | None] = [processed_data]\n\n # This is where we've manually processed the data\n try:\n if \"exported_content\" not in serialized_data:\n clean_data = [\n Data(\n data={\n \"file_path\": file_path,\n **(\n item[\"element\"]\n if \"element\" in item\n else {k: v for k, v in item.items() if k != \"file_path\"}\n ),\n }\n )\n for item in serialized_data[\"doc\"]\n ]\n except Exception as _: # noqa: BLE001\n raise ValueError(serialized_data) from None\n\n # Repeat file_list to match the number of processed data elements\n final_data: list[Data | None] = clean_data\n return self.rollup_data(file_list, final_data)\n\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_count = len(file_list)\n\n self.log(f\"Starting parallel processing of {file_count} files with concurrency: {concurrency}.\")\n file_paths = [str(file.path) for file in file_list]\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n\n return self.rollup_data(file_list, my_data)\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n # TODO: Update\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n\n def _process_with_docling_and_export(self, file_path: str) -> Data:\n \"\"\"Process a single file with Docling and export to the specified format.\"\"\"\n # Import docling components only when needed\n docling_imports = self._try_import_docling()\n\n if docling_imports is None:\n msg = \"Docling not available for advanced processing\"\n raise ImportError(msg)\n\n conversion_status = docling_imports.conversion_status\n document_converter = docling_imports.document_converter\n image_ref_mode = docling_imports.image_ref_mode\n\n try:\n # Create converter based on strategy and pipeline setting\n if docling_imports.strategy == \"latest\" and self.pipeline == \"standard\":\n converter = self._create_advanced_converter(docling_imports)\n else:\n # Use basic converter for compatibility\n converter = document_converter()\n self.log(\"Using basic DocumentConverter for Docling processing\")\n\n # Process single file\n result = converter.convert(file_path)\n\n # Check if conversion was successful\n success = False\n if hasattr(result, \"status\"):\n if hasattr(conversion_status, \"SUCCESS\"):\n success = result.status == conversion_status.SUCCESS\n else:\n success = str(result.status).lower() == \"success\"\n elif hasattr(result, \"document\"):\n # If no status but has document, assume success\n success = result.document is not None\n\n if not success:\n return Data(data={\"error\": \"Docling conversion failed\", \"file_path\": file_path})\n\n if self.markdown:\n self.log(\"Exporting document to Markdown format\")\n # Export the document to the specified format\n exported_content = self._export_document(result.document, image_ref_mode)\n\n return Data(\n text=exported_content,\n data={\n \"exported_content\": exported_content,\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n },\n )\n\n return Data(\n data={\n \"doc\": self.docling_to_dataframe_simple(result.document.export_to_dict()),\n \"export_format\": self.EXPORT_FORMAT,\n \"file_path\": file_path,\n }\n )\n\n except Exception as e: # noqa: BLE001\n return Data(data={\"error\": f\"Docling processing error: {e!s}\", \"file_path\": file_path})\n\n def docling_to_dataframe_simple(self, doc):\n \"\"\"Extract all text elements into a simple DataFrame.\"\"\"\n return [\n {\n \"page_no\": text[\"prov\"][0][\"page_no\"] if text[\"prov\"] else None,\n \"label\": text[\"label\"],\n \"text\": text[\"text\"],\n \"level\": text.get(\"level\", None), # for headers\n }\n for text in doc[\"texts\"]\n ]\n\n def _export_document(self, document: Any, image_ref_mode: type[Enum]) -> str:\n \"\"\"Export document to Markdown format with placeholder images.\"\"\"\n try:\n image_mode = (\n image_ref_mode(self.IMAGE_MODE) if hasattr(image_ref_mode, self.IMAGE_MODE) else self.IMAGE_MODE\n )\n\n # Always export to Markdown since it's fixed\n return document.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n\n except Exception as e: # noqa: BLE001\n self.log(f\"Markdown export failed: {e}, using basic text export\")\n # Fallback to basic text export\n try:\n return document.export_to_text()\n except Exception: # noqa: BLE001\n return str(document)\n"
4394
+ "value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\n\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n # ---- Inputs / Outputs (kept as close to original as possible) -------------------\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"<script>\"` and\n passing JSON config via stdin. The child prints a JSON result to stdout.\n \"\"\"\n if not file_path:\n return None\n\n args: dict[str, Any] = {\n \"file_path\": file_path,\n \"markdown\": bool(self.markdown),\n \"image_mode\": str(self.IMAGE_MODE),\n \"md_image_placeholder\": str(self.md_image_placeholder),\n \"md_page_break_placeholder\": str(self.md_page_break_placeholder),\n \"pipeline\": str(self.pipeline),\n \"ocr_engine\": str(self.ocr_engine) if getattr(self, \"ocr_engine\", \"\") else None,\n }\n\n # The child is a tiny, self-contained script to keep memory/state isolated.\n child_script = textwrap.dedent(\n r\"\"\"\n import json, sys\n\n def try_imports():\n # Strategy 1: latest layout\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore\n from docling.document_converter import DocumentConverter # type: ignore\n from docling_core.types.doc import ImageRefMode # type: ignore\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"latest\"\n except Exception:\n pass\n # Strategy 2: alternative layout\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore\n except Exception:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore\n except Exception:\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n try:\n from docling_core.types.doc import ImageRefMode # type: ignore\n except Exception:\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"alternative\"\n except Exception:\n pass\n # Strategy 3: basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"basic\"\n except Exception as e:\n raise ImportError(f\"Docling imports failed: {e}\") from e\n\n def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):\n if strategy == \"latest\" and pipeline == \"standard\":\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore\n from docling.document_converter import PdfFormatOption # type: ignore\n pipe = PdfPipelineOptions()\n if ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore\n pipe.do_ocr = True\n fac = get_ocr_factory(allow_external_plugins=False)\n pipe.ocr_options = fac.create_options(kind=ocr_engine)\n except Exception:\n pipe.do_ocr = False\n fmt = {}\n if hasattr(input_format, \"PDF\"):\n fmt[getattr(input_format, \"PDF\")] = PdfFormatOption(pipeline_options=pipe)\n if hasattr(input_format, \"IMAGE\"):\n fmt[getattr(input_format, \"IMAGE\")] = PdfFormatOption(pipeline_options=pipe)\n return DocumentConverter(format_options=fmt)\n except Exception:\n return DocumentConverter()\n return DocumentConverter()\n\n def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):\n try:\n mode = getattr(ImageRefMode, image_mode.upper(), image_mode)\n return document.export_to_markdown(\n image_mode=mode,\n image_placeholder=img_ph,\n page_break_placeholder=pg_ph,\n )\n except Exception:\n try:\n return document.export_to_text()\n except Exception:\n return str(document)\n\n def to_rows(doc_dict):\n rows = []\n for t in doc_dict.get(\"texts\", []):\n prov = t.get(\"prov\") or []\n page_no = None\n if prov and isinstance(prov, list) and isinstance(prov[0], dict):\n page_no = prov[0].get(\"page_no\")\n rows.append({\n \"page_no\": page_no,\n \"label\": t.get(\"label\"),\n \"text\": t.get(\"text\"),\n \"level\": t.get(\"level\"),\n })\n return rows\n\n def main():\n cfg = json.loads(sys.stdin.read())\n file_path = cfg[\"file_path\"]\n markdown = cfg[\"markdown\"]\n image_mode = cfg[\"image_mode\"]\n img_ph = cfg[\"md_image_placeholder\"]\n pg_ph = cfg[\"md_page_break_placeholder\"]\n pipeline = cfg[\"pipeline\"]\n ocr_engine = cfg.get(\"ocr_engine\")\n meta = {\"file_path\": file_path}\n\n try:\n ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()\n converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)\n try:\n res = converter.convert(file_path)\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling conversion error: {e}\", \"meta\": meta}))\n return\n\n ok = False\n if hasattr(res, \"status\"):\n try:\n ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == \"success\")\n except Exception:\n ok = (str(res.status).lower() == \"success\")\n if not ok and hasattr(res, \"document\"):\n ok = getattr(res, \"document\", None) is not None\n if not ok:\n print(json.dumps({\"ok\": False, \"error\": \"Docling conversion failed\", \"meta\": meta}))\n return\n\n doc = getattr(res, \"document\", None)\n if doc is None:\n print(json.dumps({\"ok\": False, \"error\": \"Docling produced no document\", \"meta\": meta}))\n return\n\n if markdown:\n text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)\n print(json.dumps({\"ok\": True, \"mode\": \"markdown\", \"text\": text, \"meta\": meta}))\n return\n\n # structured\n try:\n doc_dict = doc.export_to_dict()\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling export_to_dict failed: {e}\", \"meta\": meta}))\n return\n\n rows = to_rows(doc_dict)\n print(json.dumps({\"ok\": True, \"mode\": \"structured\", \"doc\": rows, \"meta\": meta}))\n except Exception as e:\n print(\n json.dumps({\n \"ok\": False,\n \"error\": f\"Docling processing error: {e}\",\n \"meta\": {\"file_path\": file_path},\n })\n )\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n )\n\n # Validate file_path to avoid command injection or unsafe input\n if not isinstance(args[\"file_path\"], str) or any(c in args[\"file_path\"] for c in [\";\", \"|\", \"&\", \"$\", \"`\"]):\n return Data(data={\"error\": \"Unsafe file path detected.\", \"file_path\": args[\"file_path\"]})\n\n proc = subprocess.run( # noqa: S603\n [sys.executable, \"-u\", \"-c\", child_script],\n input=json.dumps(args).encode(\"utf-8\"),\n capture_output=True,\n check=False,\n )\n\n if not proc.stdout:\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\") or \"no output from child process\"\n return Data(data={\"error\": f\"Docling subprocess error: {err_msg}\", \"file_path\": file_path})\n\n try:\n result = json.loads(proc.stdout.decode(\"utf-8\"))\n except Exception as e: # noqa: BLE001\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\")\n return Data(\n data={\"error\": f\"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}\", \"file_path\": file_path},\n )\n\n if not result.get(\"ok\"):\n return Data(data={\"error\": result.get(\"error\", \"Unknown Docling error\"), **result.get(\"meta\", {})})\n\n meta = result.get(\"meta\", {})\n if result.get(\"mode\") == \"markdown\":\n exported_content = str(result.get(\"text\", \"\"))\n return Data(\n text=exported_content,\n data={\"exported_content\": exported_content, \"export_format\": self.EXPORT_FORMAT, **meta},\n )\n\n rows = list(result.get(\"doc\", []))\n return Data(data={\"doc\": rows, \"export_format\": self.EXPORT_FORMAT, **meta})\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process input files.\n\n - Single file + advanced_mode => Docling in a separate process.\n - Otherwise => standard parsing in current process (optionally threaded).\n \"\"\"\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n self.log(f\"File not found: {file_path}. Error: {e}\")\n if not silent_errors:\n raise\n return None\n except Exception as e:\n self.log(f\"Unexpected error processing {file_path}: {e}\")\n if not silent_errors:\n raise\n return None\n\n # Advanced path: only for a single Docling-compatible file\n if len(file_list) == 1:\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n advanced_data: Data | None = self._process_docling_in_subprocess(file_path)\n\n # --- UNNEST: expand each element in `doc` to its own Data row\n payload = getattr(advanced_data, \"data\", {}) or {}\n doc_rows = payload.get(\"doc\")\n if isinstance(doc_rows, list):\n rows: list[Data | None] = [\n Data(\n data={\n \"file_path\": file_path,\n **(item if isinstance(item, dict) else {\"value\": item}),\n },\n )\n for item in doc_rows\n ]\n return self.rollup_data(file_list, rows)\n\n # If not structured, keep as-is (e.g., markdown export or error dict)\n return self.rollup_data(file_list, [advanced_data])\n\n # Standard multi-file (or single non-advanced) path\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_paths = [str(f.path) for f in file_list]\n self.log(f\"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.\")\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n return self.rollup_data(file_list, my_data)\n\n # ------------------------------ Output helpers -----------------------------------\n\n def load_files_advanced(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to an advanced format.\"\"\"\n self.markdown = False\n return self.load_files()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files()\n return Message(text=str(result.text[0]))\n"
4395
4395
  },
4396
4396
  "concurrency_multithreading": {
4397
4397
  "_input_type": "IntInput",
@@ -298,7 +298,7 @@
298
298
  },
299
299
  {
300
300
  "name": "langchain_core",
301
- "version": "0.3.72"
301
+ "version": "0.3.75"
302
302
  }
303
303
  ],
304
304
  "total_dependencies": 3
@@ -1482,7 +1482,7 @@
1482
1482
  },
1483
1483
  {
1484
1484
  "name": "fastapi",
1485
- "version": "0.115.13"
1485
+ "version": "0.116.1"
1486
1486
  },
1487
1487
  {
1488
1488
  "name": "langflow",
@@ -0,0 +1,74 @@
1
+ from typing import Any, Literal
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class OpenAIResponsesRequest(BaseModel):
7
+ """OpenAI-compatible responses request with flow_id as model parameter."""
8
+
9
+ model: str = Field(..., description="The flow ID to execute (used instead of OpenAI model)")
10
+ input: str = Field(..., description="The input text to process")
11
+ stream: bool = Field(default=False, description="Whether to stream the response")
12
+ background: bool = Field(default=False, description="Whether to process in background")
13
+ tools: list[Any] | None = Field(default=None, description="Tools are not supported yet")
14
+ previous_response_id: str | None = Field(
15
+ default=None, description="ID of previous response to continue conversation"
16
+ )
17
+ include: list[str] | None = Field(
18
+ default=None, description="Additional response data to include, e.g., ['tool_call.results']"
19
+ )
20
+
21
+
22
+ class OpenAIResponsesResponse(BaseModel):
23
+ """OpenAI-compatible responses response."""
24
+
25
+ id: str
26
+ object: Literal["response"] = "response"
27
+ created_at: int
28
+ status: Literal["completed", "in_progress", "failed"] = "completed"
29
+ error: dict | None = None
30
+ incomplete_details: dict | None = None
31
+ instructions: str | None = None
32
+ max_output_tokens: int | None = None
33
+ model: str
34
+ output: list[dict]
35
+ parallel_tool_calls: bool = True
36
+ previous_response_id: str | None = None
37
+ reasoning: dict = Field(default_factory=lambda: {"effort": None, "summary": None})
38
+ store: bool = True
39
+ temperature: float = 1.0
40
+ text: dict = Field(default_factory=lambda: {"format": {"type": "text"}})
41
+ tool_choice: str = "auto"
42
+ tools: list[dict] = Field(default_factory=list)
43
+ top_p: float = 1.0
44
+ truncation: str = "disabled"
45
+ usage: dict | None = None
46
+ user: str | None = None
47
+ metadata: dict = Field(default_factory=dict)
48
+
49
+
50
+ class OpenAIResponsesStreamChunk(BaseModel):
51
+ """OpenAI-compatible responses stream chunk."""
52
+
53
+ id: str
54
+ object: Literal["response.chunk"] = "response.chunk"
55
+ created: int
56
+ model: str
57
+ delta: dict
58
+ status: Literal["completed", "in_progress", "failed"] | None = None
59
+
60
+
61
+ class OpenAIErrorResponse(BaseModel):
62
+ error: dict = Field(..., description="Error details")
63
+
64
+
65
+ def create_openai_error(message: str, type_: str = "invalid_request_error", code: str | None = None) -> dict:
66
+ """Create an OpenAI-compatible error response."""
67
+ error_data = {
68
+ "message": message,
69
+ "type": type_,
70
+ }
71
+ if code:
72
+ error_data["code"] = code
73
+
74
+ return {"error": error_data}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langflow-base-nightly
3
- Version: 0.5.0.dev39
3
+ Version: 0.5.1.dev0
4
4
  Summary: A Python package with a built-in web application
5
5
  Project-URL: Repository, https://github.com/langflow-ai/langflow
6
6
  Project-URL: Documentation, https://docs.langflow.org
@@ -68,15 +68,15 @@ langflow/api/disconnect.py,sha256=tXLWfMs_UHh2nuNTDAV956R1K8gnVJOQDyh2J5RkFp8,10
68
68
  langflow/api/health_check_router.py,sha256=9ZXYLKAQaeALD919ctc_EaKfY3UydE6xNkHG1pF9gQA,2224
69
69
  langflow/api/limited_background_tasks.py,sha256=VlDAm9vk-C9xyCAtVi0QhvLrFPXqXB6jhGgFjLSvv2s,1318
70
70
  langflow/api/log_router.py,sha256=Wd1EZ0wQxwCrs618Ih554vZkUuEpE0Taf-VfU6e039w,3795
71
- langflow/api/router.py,sha256=DptUmB3zDGLpu2fqi_4BtdKYdB-Vwx9HGL39u0VyVis,1651
71
+ langflow/api/router.py,sha256=tA1TLe4XzCcoyCxgu-JMafeafJGpVLYnLJkO0xZElLo,1730
72
72
  langflow/api/schemas.py,sha256=OJkHZcoDp_yHKh-4ZV-_EI5H8XbCOpxMLfybt5J7LYs,246
73
73
  langflow/api/utils.py,sha256=N051HaNsCgxr4kA4EjubyeqJUM56yna-m7hscuQ6tSs,14505
74
- langflow/api/v1/__init__.py,sha256=CsmlxhnafAsUcMzrwpL6kVKEewsQ8QEutluWlf2Uzp8,1550
74
+ langflow/api/v1/__init__.py,sha256=ZB_JlAtmnLULRVzyiXlXjgVljWYVUk1dok0doGSnAoA,1660
75
75
  langflow/api/v1/api_key.py,sha256=J3nJpRlEk25As3tCfX7A_PYNOcKdjxXGkFsMcY9A2HM,2812
76
76
  langflow/api/v1/base.py,sha256=Ex31PhkQHUHCb5tWaCfrTvsUUPxBV8JGCNsfOgLxxzE,1292
77
77
  langflow/api/v1/callback.py,sha256=8dTZdw-PMyJNCQm2xr76s7loX-ecP1D1p_YT-W6ScLw,5077
78
78
  langflow/api/v1/chat.py,sha256=GrN-t96ult3ynL0Rd_FgOfgWKNHcE9bcLvzGTjxcDRo,25240
79
- langflow/api/v1/endpoints.py,sha256=ELAK6z5nnopik5tEQaT5q0qLr3I7x9SedQCsDFplhEw,30625
79
+ langflow/api/v1/endpoints.py,sha256=yTYbtWyvMCkupcY4fr4zbdiYx9uAN7iLVh15f33Qy7I,30827
80
80
  langflow/api/v1/files.py,sha256=UxYrE1VFw5_VRiig3N3mC9ouTIQ-51qHOjiXYulXyNQ,7666
81
81
  langflow/api/v1/flows.py,sha256=F-RKSRjz7PrXrjsFDOceRLurIfDaHbvDL8x5skrJ3-8,21434
82
82
  langflow/api/v1/folders.py,sha256=uk8O2w-8d0jGb8TIJEz3mO09Ib1yTzGhmyW54MKdpqA,3286
@@ -86,6 +86,7 @@ langflow/api/v1/mcp.py,sha256=1e13_sg0qvT6FVE2V79K2Zhv7cFYyCk6TYQlV2ONxdY,4776
86
86
  langflow/api/v1/mcp_projects.py,sha256=WI1HoWTaTyYq6JD93atAzbNIsx7DQoYDl8j0kyIhWk0,45888
87
87
  langflow/api/v1/mcp_utils.py,sha256=nvjRwrbvQfnBZbrSXMWryeJEbkJ8DQK-SlrFgbsEotU,13804
88
88
  langflow/api/v1/monitor.py,sha256=2vMUdIdzbIaFdyVvr5RrXQvNQqLNLcHnlGwzdkqkiI4,7567
89
+ langflow/api/v1/openai_responses.py,sha256=641RzIVVlBsyDXDVqr7mKsqxMh8xgZSp0v67sJdWzWA,26617
89
90
  langflow/api/v1/projects.py,sha256=CkOYAkycZfYCNWCPJQr_0wlNfSvIq82EGO95lvFjBm4,13732
90
91
  langflow/api/v1/schemas.py,sha256=cQV4TjMMO4sRvcWlQws8x10p93Z5Z8nRra01Chbynbk,14438
91
92
  langflow/api/v1/starter_projects.py,sha256=a7nF3Z_O-R5gGqHDpQnvG6zN8Y22FX7Xtx48kXX4EvY,637
@@ -277,7 +278,7 @@ langflow/components/data/__init__.py,sha256=oFZ21yDYiUERdp1KbM4_vwdmCvgaR80Tsmms
277
278
  langflow/components/data/api_request.py,sha256=pkitJvImjm2oufnupZFm-3NVnHCeBAQybKQQz5r0zN8,19777
278
279
  langflow/components/data/csv_to_data.py,sha256=FL99gVyquYmdrD6Z1S0X_l3DDkVDRfdCwzdwjc-LQ4g,3328
279
280
  langflow/components/data/directory.py,sha256=MqSUyq5cL6Xy2CqBREc0hJlcoega3r82ti29oNmGlog,3966
280
- langflow/components/data/file.py,sha256=hixxblUUIM6eCDlHmcNHeA78qNQ4WXTMRK3mzJuvvlE,25373
281
+ langflow/components/data/file.py,sha256=OtH01SPt0FiZWhZ4wbMq7gDirxh4oMSBXWvrTf54B1A,24146
281
282
  langflow/components/data/json_to_data.py,sha256=uN3yyVHo-DOvv0ZwYQx99V-rWddh3A6iDBKW7ga1J4c,3554
282
283
  langflow/components/data/kb_ingest.py,sha256=bGIGPywJzaxpDglxs_z1C1cpw8M0yL-nPGTDQSz_2Lc,27597
283
284
  langflow/components/data/kb_retrieval.py,sha256=b8-GvhrKDKXqqRAYLNLNgnrsks0gqYpRLdZYDgEPTms,10120
@@ -331,7 +332,7 @@ langflow/components/deepseek/__init__.py,sha256=KJHElyBgRcJIGoejJV-MSwSQ-fz3ZbVC
331
332
  langflow/components/deepseek/deepseek.py,sha256=VJo3tfF8lOoAmhzzv3CeRUFb2zDZG18-BokpAR-GMYU,4717
332
333
  langflow/components/docling/__init__.py,sha256=MxEEA_bDfWEQ630yj-K0cwkRNd9UVFDKeEJ9HXhRghg,1429
333
334
  langflow/components/docling/chunk_docling_document.py,sha256=Y8JVHza3uUiANkpoGkmvULKG47So7R_0h5ogtc3KA4E,7620
334
- langflow/components/docling/docling_inline.py,sha256=FHWeGWIPSuh0K5dSBq11xDTdOkxPTkxckpyHNIhBdF4,6085
335
+ langflow/components/docling/docling_inline.py,sha256=HkqQ6YguZqAnGpb3ryzqiZVsl-yTSYMVjF8ssv1pJKk,8353
335
336
  langflow/components/docling/docling_remote.py,sha256=iAU4hgQxklYr_3OECuoXI08iQ_MvJ22JC2LrGVU0vwQ,6810
336
337
  langflow/components/docling/export_docling_document.py,sha256=RRyWc71MpzlI7Tx1mW4XMn9lKkQCqlIh_fPCkeAAbpE,4701
337
338
  langflow/components/documentloaders/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -469,7 +470,7 @@ langflow/components/novita/novita.py,sha256=b7MPBa9UeEpOxe-UuHkdXAn1FiAPwrzTwkPS
469
470
  langflow/components/nvidia/__init__.py,sha256=8mOEelaSwLNxjn7hMgJCOUiA1NScO32Wu8Y7SpmrmZk,1761
470
471
  langflow/components/nvidia/nvidia.py,sha256=x_yWQfnVw__nDQFmQ0rFlC3ArY8uZHUFFL5Eq7p7gtA,6366
471
472
  langflow/components/nvidia/nvidia_embedding.py,sha256=2mr_l3cqPpDqidrscjBTPLv2HG21JD-0Oj6cuQS-X4g,2683
472
- langflow/components/nvidia/nvidia_ingest.py,sha256=7Qt3hrKRqzM5P5TQBZ3M9iRV5Ka3c3HDx0R-o_Y_8aw,12063
473
+ langflow/components/nvidia/nvidia_ingest.py,sha256=C1Ex7_fWA4h3QzMRSbQV4faBRxsAjMzGB9NqrZlriNE,12070
473
474
  langflow/components/nvidia/nvidia_rerank.py,sha256=OuG0jPa7jHe0WXOfSfhu5IOvAwdKcO5KHSepVYo2YI0,2323
474
475
  langflow/components/nvidia/system_assist.py,sha256=EJCcjXjs_9_DgkYEs2iYBZQchJgiSAKyKfWShVQk4a8,2493
475
476
  langflow/components/olivya/__init__.py,sha256=ilZR88huL3vnQHO27g4jsUkyIYSgN7RPOq8Corbi6xA,67
@@ -589,7 +590,7 @@ langflow/components/xai/xai.py,sha256=k-ceZUzCRb8WTRaz9ZoGYL9lWp8iW2OFUNsfYZmRi0
589
590
  langflow/components/yahoosearch/__init__.py,sha256=SvvC_MIzQOoz2XRHcNia_I03Ut3RrDUgUSng-QJe7V4,70
590
591
  langflow/components/yahoosearch/yahoo.py,sha256=bmHtWtgbHpgH6bHlSuXE4adidIewhcg6eIfJLBuR80Q,4896
591
592
  langflow/components/youtube/__init__.py,sha256=jnfOjsP7JO-zAdTMnawC5uN38Rxg0AFefeY0Gv5x-ug,1745
592
- langflow/components/youtube/channel.py,sha256=5kQXaJKuLxRhGK50spvyvU6mLWe1_1m-u7oMPBvWKLA,9042
593
+ langflow/components/youtube/channel.py,sha256=_h4hBGqWB4mnEbi6WqlndtiHSDg2xmTY2riTdrh_bFs,9031
593
594
  langflow/components/youtube/comments.py,sha256=rtopdfSqDYp6khMTy84GhnRRGWg5a-rvfwJaIdW9FUQ,8437
594
595
  langflow/components/youtube/playlist.py,sha256=uHnI01iJSBXoUdyzOoFnUseu3qGAnZUnkSyh98KXr3c,1124
595
596
  langflow/components/youtube/search.py,sha256=ormny6gn3l0CN1Nze2Y6Bg3Dd6OazmLE89mI2Dsk4EE,4488
@@ -614,7 +615,7 @@ langflow/custom/custom_component/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
614
615
  langflow/custom/custom_component/base_component.py,sha256=zbuFjNMAZAMBQzdFgnG6G6IrM_MLyNcDfQsM-0zkf5w,3812
615
616
  langflow/custom/custom_component/component.py,sha256=sNTb3B7avCa9-SETDzLeEkbiHodFBmNhzgClnZc1GUg,72697
616
617
  langflow/custom/custom_component/component_with_cache.py,sha256=DU76C-qQjBLIc5PRv4aU_D1pU5OJNmPkxL6lTww5B_c,323
617
- langflow/custom/custom_component/custom_component.py,sha256=I35-wAv7AGcbgdPSufraEfOJgUhnZ38CAXOlLQ-bJ5U,20587
618
+ langflow/custom/custom_component/custom_component.py,sha256=pT3OWEcCe8unj-1G7wzq6iKntf8qCUxsxWkI3YPAgrM,21144
618
619
  langflow/custom/directory_reader/__init__.py,sha256=eFjlhKjpt2Kha_sJ2EqWofLRbpvfOTjvDSCpdpaTqWk,77
619
620
  langflow/custom/directory_reader/directory_reader.py,sha256=UEY8kepNw5jA8fzcFpIhx4FWlYGcok2RuwdY5X0II9c,15288
620
621
  langflow/custom/directory_reader/utils.py,sha256=jbG4zAaR04LsqWTu90MaQ0TkXH-5OMQ_D5ZQZKWAMJo,6551
@@ -786,7 +787,7 @@ langflow/graph/edge/schema.py,sha256=vw5SVl7EelsGfUYkvGYzzoiBGGk00ztaLyH_N07hAxA
786
787
  langflow/graph/edge/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
787
788
  langflow/graph/graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
788
789
  langflow/graph/graph/ascii.py,sha256=-jYWI_Zmz24mfLOxJrzIINZ0dbQgd9PXJfqwZa03xas,6211
789
- langflow/graph/graph/base.py,sha256=-bLOPIaHRNW_5VD-xUGbc_Pj6WXConpma9cHR0kh1pM,91016
790
+ langflow/graph/graph/base.py,sha256=9d3vNDYSVF3unpmbgwMKJ6DiPB-lmXcNbg4D6hyGwVE,91146
790
791
  langflow/graph/graph/constants.py,sha256=y0e3-kQWz9zJkAarKIueBljDIspqIDbesn5metrPiP8,1557
791
792
  langflow/graph/graph/runnable_vertices_manager.py,sha256=c-qQP3koKyAsIADDSONiiz4FIRIn6q5kAMX6EQIBBfA,6148
792
793
  langflow/graph/graph/schema.py,sha256=pR_FlwBWOCyFBfkiWpecx_DP1BoWCGHJurT-YwkSCwo,1111
@@ -892,39 +893,39 @@ langflow/initial_setup/profile_pictures/Space/047-computer.svg,sha256=J7xvt6THPE
892
893
  langflow/initial_setup/profile_pictures/Space/048-satellite.svg,sha256=WhKM7trc3c3fBlvJ3nMukEtg4kV5Ayh8cfuVDeYqFJ4,17879
893
894
  langflow/initial_setup/profile_pictures/Space/049-astronaut.svg,sha256=0ovhZ0SR3Id8a1VfwkUzljtaYKf6i2pfNLuTr8inTDs,8671
894
895
  langflow/initial_setup/profile_pictures/Space/050-space robot.svg,sha256=QIP9tuThFuutjd1dcx-rXUi1_S9GGmMbqFkXDqg_0fw,15844
895
- langflow/initial_setup/starter_projects/Basic Prompt Chaining.json,sha256=-_wzRSzZBpvagQJFLn8O_4pK8thR4bVzTGBoAKpsp2s,115602
896
- langflow/initial_setup/starter_projects/Basic Prompting.json,sha256=bbX0X85FW67KyotrrxxAsHQzm_70tGEvNX9EixFslb0,62327
897
- langflow/initial_setup/starter_projects/Blog Writer.json,sha256=T_e3qNu-OQUo1GGftsU_Jo5GBnxsn642nM_b5OTykes,92543
898
- langflow/initial_setup/starter_projects/Custom Component Generator.json,sha256=u9_rJvlDKdzp1JnCk-erQzzQO71EYDM9Vv_dNcioLRs,168424
899
- langflow/initial_setup/starter_projects/Document Q&A.json,sha256=UUgN_s2wxwzXSjRZB5w6BgbmlNUMg2Y7eiUCg3UZtFo,99957
900
- langflow/initial_setup/starter_projects/Financial Report Parser.json,sha256=avq_HP1YOYf_bxJmfHMYr_zVBDgFzRcrwqh-YSiZjPk,90848
901
- langflow/initial_setup/starter_projects/Hybrid Search RAG.json,sha256=XaojogOTGk9MB01N7IymZTkegOGhviaHmYv6FKTZVFQ,208763
902
- langflow/initial_setup/starter_projects/Image Sentiment Analysis.json,sha256=vcWX3XI_3gPwgkPmyHezTVvynlAA4_bw8Qn3ECIKS8U,114790
903
- langflow/initial_setup/starter_projects/Instagram Copywriter.json,sha256=hvSu9c13veyTJ4scmprCjdmKO8vhMURmV9sLGwAyyyo,181503
904
- langflow/initial_setup/starter_projects/Invoice Summarizer.json,sha256=Bc8zU_T0jUtDSidZN4QY2mVNcyLDjoeVUyZ4cY17pXs,107084
905
- langflow/initial_setup/starter_projects/Knowledge Ingestion.json,sha256=f2N5OlM3XryvonUifuemvSARMgDeBlhGlWI0F0vSquc,86850
906
- langflow/initial_setup/starter_projects/Knowledge Retrieval.json,sha256=eJDvjvuA23r8RjDITW5SlUWiTFWIuIjl1ilQyBJTXYI,45557
907
- langflow/initial_setup/starter_projects/Market Research.json,sha256=k4ie4ic6TigmJDvtTd3ZkJtsyyv23hFOhV2h2Uh_3m8,163224
908
- langflow/initial_setup/starter_projects/Meeting Summary.json,sha256=GO8TcgMyWPqyWzI8R8hhCU-f5rhtYJDnyUq8w6O3kOo,198657
909
- langflow/initial_setup/starter_projects/Memory Chatbot.json,sha256=hOVfEjsgi31FhgrTe1fm-xNnzAhxle7BhVbbHZHBKbw,86466
910
- langflow/initial_setup/starter_projects/News Aggregator.json,sha256=a9izGTtRv0EIVKVs7DCF1NVmS8-pFQeAEGiAhHzaezk,125315
911
- langflow/initial_setup/starter_projects/Nvidia Remix.json,sha256=ciG3g4P77-0FpT8tBoK2suQ5w8YsAvbgQCOrgJ0mHSE,327225
912
- langflow/initial_setup/starter_projects/Pokédex Agent.json,sha256=wECF5Fp0eOEkxJbq-CBtPQ20APN7kK4Xo2juGCRnEqI,123557
913
- langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json,sha256=V1ZWliA91GrjdtK69XUARxeMxRZI9tASWadsDii5RRg,145355
914
- langflow/initial_setup/starter_projects/Price Deal Finder.json,sha256=BXJNc_B6nBfh-MHpCXtOhm-LX8a30PGT2aWoPYd4O08,134371
915
- langflow/initial_setup/starter_projects/Research Agent.json,sha256=51nywiUsR0bZaHIHOZiH8znkIT0lG89NmL0_0I7ZLEw,180741
916
- langflow/initial_setup/starter_projects/Research Translation Loop.json,sha256=JIXgGglcUlNHBwBTU459TcuOhf4QH-id6HfleuqVC8A,103408
917
- langflow/initial_setup/starter_projects/SEO Keyword Generator.json,sha256=fh0LBiC1yzcF0dQkij6FZ5AYPqd0bv1zl23xbmtmzIo,64883
918
- langflow/initial_setup/starter_projects/SaaS Pricing.json,sha256=3-96k_1Z77NcfXOEC0zqd3TTKrIPHBATZwdDMqhbHpo,93789
919
- langflow/initial_setup/starter_projects/Search agent.json,sha256=vVi10xBBKjY-Hg8SGvUtMBTn0ygADetyYlncNKz-HgY,94503
920
- langflow/initial_setup/starter_projects/Sequential Tasks Agents.json,sha256=ZGLNTerMQMC07MCOXPVRkAXX02p_PgZ8yn4GRd8oPbk,268185
921
- langflow/initial_setup/starter_projects/Simple Agent.json,sha256=Vh4owNXT8Gy3ovmtW0Ul7uFEAd4k0LXfu8jzE3grF9I,122723
922
- langflow/initial_setup/starter_projects/Social Media Agent.json,sha256=lHobMLDXYFSv0mkBbxbb6ABvF_nkW-Abol8Zyeoox48,132463
923
- langflow/initial_setup/starter_projects/Text Sentiment Analysis.json,sha256=glPIVAHa7lw3llmPC4Ucfn1ApXK8zvpmI2ZpIrKSdMg,164665
924
- langflow/initial_setup/starter_projects/Travel Planning Agents.json,sha256=JZQHEMoDmxVzQF9hMqzgaoqQgeRpbfzQzfRzotjhhxA,230285
925
- langflow/initial_setup/starter_projects/Twitter Thread Generator.json,sha256=-x7hM1_PaIzbmFDx4wjQyIogEelOazCoMEjvACs3lpY,105525
926
- langflow/initial_setup/starter_projects/Vector Store RAG.json,sha256=ChMu1ZIJs9mopIsj7ikut-0iy9-QX1ATiPCn0BziKZI,359886
927
- langflow/initial_setup/starter_projects/Youtube Analysis.json,sha256=KIsg_pV5ILa0xuS6dJCKMQdsRokB76dpnZ-hJEF1j10,180286
896
+ langflow/initial_setup/starter_projects/Basic Prompt Chaining.json,sha256=NknyVXhrfYksNX9_uvM4v2IlD3SIVb0HZVwSVUYdLsw,115601
897
+ langflow/initial_setup/starter_projects/Basic Prompting.json,sha256=ypM9IVqvKzi6gw_8BkBnf_D8NCTe2776-X4fIzxJZVY,62326
898
+ langflow/initial_setup/starter_projects/Blog Writer.json,sha256=m6qwScYwwY3B27j3uDyPV0vby0yrGaolwmzNLBrJr48,92542
899
+ langflow/initial_setup/starter_projects/Custom Component Generator.json,sha256=axqTaprDdRhbE13CVfzRhRGo7-oURkxmm0baoBYGA2w,168423
900
+ langflow/initial_setup/starter_projects/Document Q&A.json,sha256=tgBs3aluwfXf4vcf8LMt89-05wncrv_OsjdMxFEbr-U,98737
901
+ langflow/initial_setup/starter_projects/Financial Report Parser.json,sha256=dJbkKz31boByZlSIQOnKeIaFMAovbOKnD1OkXZa4NL8,90847
902
+ langflow/initial_setup/starter_projects/Hybrid Search RAG.json,sha256=aLraDhaoRwbjZreca5ZBoS7talEtx1SxEmRzIO9sMOo,208762
903
+ langflow/initial_setup/starter_projects/Image Sentiment Analysis.json,sha256=sT57DPS834QYc1uYClGvHlLpV4SjKRmiE7zIS_fhmS8,114789
904
+ langflow/initial_setup/starter_projects/Instagram Copywriter.json,sha256=k1NqyWzXBIb_HmjUthZgyurl4Lp9q9dMLNtfWYrKGAE,181502
905
+ langflow/initial_setup/starter_projects/Invoice Summarizer.json,sha256=KcuGQFvVyuYTDnr5APPG4grnci_5y3W9ixodbspWfaM,107083
906
+ langflow/initial_setup/starter_projects/Knowledge Ingestion.json,sha256=sdgAf0fRg5j6F3jsTr9oz2sH9mIbIY5iS4VSHGa-TwI,86850
907
+ langflow/initial_setup/starter_projects/Knowledge Retrieval.json,sha256=vPr-EeJDieWcsMKtLgJ8uNtOwfljILOaeC7oxTU6K7s,45556
908
+ langflow/initial_setup/starter_projects/Market Research.json,sha256=h_whSp6PP18wBq7Ilck9zujp4QNeMDLbT2Q8_ua4n0k,163223
909
+ langflow/initial_setup/starter_projects/Meeting Summary.json,sha256=yPNDbxZTlFR2bsPsowAYWsmWeLg5PjGEASQJxZxtl5Q,198654
910
+ langflow/initial_setup/starter_projects/Memory Chatbot.json,sha256=LFnunBM09jf1sNN0LsDLTd1meDBkmB7SiW1G-XVp3Ag,86465
911
+ langflow/initial_setup/starter_projects/News Aggregator.json,sha256=5Azo1MnO9uWRUlvKaUeqa-LZV26gSxOpGXtzy3CaE1M,125313
912
+ langflow/initial_setup/starter_projects/Nvidia Remix.json,sha256=A8bjwy098mZPM3OEfh_rAWa4-J80dDzTaHiuyfVhARE,327224
913
+ langflow/initial_setup/starter_projects/Pokédex Agent.json,sha256=3Es_4yG8SXyaOD5bpWcXpEcITe-CZzzsfJX39uxvxHU,123556
914
+ langflow/initial_setup/starter_projects/Portfolio Website Code Generator.json,sha256=-tFde-WSPpeazV6b_XSPwpx1-SzA5YOtDkJbJ8eJXFM,144135
915
+ langflow/initial_setup/starter_projects/Price Deal Finder.json,sha256=kNHF7cSWahOxE3pHqpZzcxeKkfCuF6yygo8eLF7x7uk,134370
916
+ langflow/initial_setup/starter_projects/Research Agent.json,sha256=3X5OEtarSV4nsBXRdWG1KriQM83JoTJHFLilt9BiPaw,180740
917
+ langflow/initial_setup/starter_projects/Research Translation Loop.json,sha256=bbwJTHvL2MJSt257q3rcSrX1FyQAe80SVNfHGe-9A4M,103407
918
+ langflow/initial_setup/starter_projects/SEO Keyword Generator.json,sha256=w_z0GkgvS01WZ2nB0rrnZprLowqoEfOFMnA5nv6Ht1M,64882
919
+ langflow/initial_setup/starter_projects/SaaS Pricing.json,sha256=Nx580ShQcRatE7LXOnxvhyMc7dWj6HsHIINBZ-ydTcs,93788
920
+ langflow/initial_setup/starter_projects/Search agent.json,sha256=6nbbb3xapsdE41NdmWB_5OS6y49QUt16LO0J1ShlgNU,94502
921
+ langflow/initial_setup/starter_projects/Sequential Tasks Agents.json,sha256=bu9-5qMm7R8BfZUHdKxDlRAQGrKGTveKVlAiyL9SB3o,268184
922
+ langflow/initial_setup/starter_projects/Simple Agent.json,sha256=Bg6GPbgvZlJG0SviBUxRfyMEDofipxhbzgCWGyi34cw,122722
923
+ langflow/initial_setup/starter_projects/Social Media Agent.json,sha256=CdhsCQSsTIa9iMGtIKHnNV7VxLB-Xja7MXZ9bsjFs5c,132460
924
+ langflow/initial_setup/starter_projects/Text Sentiment Analysis.json,sha256=mv8_9H22s5wzc8hpntXCKt6b5JwfJiSxpxsqlNirgAw,163444
925
+ langflow/initial_setup/starter_projects/Travel Planning Agents.json,sha256=vcpKH-mgTD-pi5uMHHTeZw8Z2vVfIPIuVHojGdr4PG4,230284
926
+ langflow/initial_setup/starter_projects/Twitter Thread Generator.json,sha256=Cq_ewViB4iMYbSCoqnnwb-5y9VEtA71LbBi1wGQiOu8,105524
927
+ langflow/initial_setup/starter_projects/Vector Store RAG.json,sha256=JWsH4qeL5tO5BC6K8ubo0Qy-qG4ufCP16MtEuBLNnwA,358666
928
+ langflow/initial_setup/starter_projects/Youtube Analysis.json,sha256=rvzoUJA22ncP6QjhgW5KLYxr83vXc_BJyVIW1X9cCmc,180285
928
929
  langflow/initial_setup/starter_projects/__init__.py,sha256=c5Z92jvCm680uhVKycyps-BX-lEHjjPKWcsI86METUA,673
929
930
  langflow/initial_setup/starter_projects/basic_prompting.py,sha256=fR8ca83AtbYUQzqxeI6dYJyIPk6FmqRcJsMwhDHVmTE,857
930
931
  langflow/initial_setup/starter_projects/blog_writer.py,sha256=TrscGqjqq9fD163LuCVvuNkhZIvWDWsHBUb1vNMmG1E,1457
@@ -974,6 +975,7 @@ langflow/schema/graph.py,sha256=0q-xV04AtaXR6GkCVBQFzI8PXIHdzZwmE_EuHSzEe20,1318
974
975
  langflow/schema/image.py,sha256=dyjR2FpU8PAERhdeVx-Wu82eLXNB0XOdY3V9YG1X-84,1966
975
976
  langflow/schema/log.py,sha256=PlQ8mgYF-9D4ly_krkgnIMzT03JPvVRMNskGbItbJHc,1090
976
977
  langflow/schema/message.py,sha256=fJacYMTJMsox4-a77z_GhGSfoMyqtuGDbb3HMjLJEPc,17548
978
+ langflow/schema/openai_responses_schemas.py,sha256=drMCAlliefHfGRojBTMepPwk4DyEGh67naWvMPD10Sw,2596
977
979
  langflow/schema/playground_events.py,sha256=427nKjijIISRacTb4aGHxxM0A1Fg2yGYOemI5m39GO4,6155
978
980
  langflow/schema/properties.py,sha256=eopvV559jMPw8ahUyeVx2vfGlGeZ5M3xqQJUbpZdzfM,1274
979
981
  langflow/schema/schema.py,sha256=1lW6m0HlQDmSFqFEOMxlJMj7T39IxX2LN6WY4IyftPg,3148
@@ -1149,7 +1151,7 @@ langflow/utils/util_strings.py,sha256=Blz5lwvE7lml7nKCG9vVJ6me5VNmVtYzFXDVPHPK7v
1149
1151
  langflow/utils/validate.py,sha256=WnWnTSujPoBvt85Gn5bg-ujxc5JbaS6woE_smo2YePk,16369
1150
1152
  langflow/utils/version.py,sha256=OjSj0smls9XnPd4-LpTH9AWyUO_NAn5mncqKkkXl_fw,2840
1151
1153
  langflow/utils/voice_utils.py,sha256=PawxqveuaCH5ce6mF3RQwHmiBQKjusHUdbmZ2dPr_u8,3360
1152
- langflow_base_nightly-0.5.0.dev39.dist-info/METADATA,sha256=KLA-zudjb8NYY132TX98sq6lqmellJt1E-cXi59wPbQ,4245
1153
- langflow_base_nightly-0.5.0.dev39.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
1154
- langflow_base_nightly-0.5.0.dev39.dist-info/entry_points.txt,sha256=JvuLdXSrkeDmDdpb8M-VvFIzb84n4HmqUcIP10_EIF8,57
1155
- langflow_base_nightly-0.5.0.dev39.dist-info/RECORD,,
1154
+ langflow_base_nightly-0.5.1.dev0.dist-info/METADATA,sha256=Xsoq8yFkph-iC--tqSjISao76mNkZ6IBIewwVT2WLJU,4244
1155
+ langflow_base_nightly-0.5.1.dev0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
1156
+ langflow_base_nightly-0.5.1.dev0.dist-info/entry_points.txt,sha256=JvuLdXSrkeDmDdpb8M-VvFIzb84n4HmqUcIP10_EIF8,57
1157
+ langflow_base_nightly-0.5.1.dev0.dist-info/RECORD,,