langflow-base-nightly 0.5.0.dev30__py3-none-any.whl → 0.5.0.dev31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. langflow/api/router.py +2 -0
  2. langflow/api/v1/__init__.py +2 -0
  3. langflow/api/v1/knowledge_bases.py +437 -0
  4. langflow/base/data/kb_utils.py +104 -0
  5. langflow/components/data/__init__.py +4 -0
  6. langflow/components/data/kb_ingest.py +585 -0
  7. langflow/components/data/kb_retrieval.py +254 -0
  8. langflow/frontend/assets/{SlackIcon-D2PxMQjX.js → SlackIcon-Bikuxo8x.js} +1 -1
  9. langflow/frontend/assets/{Wikipedia-BNM0lBPs.js → Wikipedia-B6aCFf5-.js} +1 -1
  10. langflow/frontend/assets/{Wolfram-COQyGyeC.js → Wolfram-CekL_M-a.js} +1 -1
  11. langflow/frontend/assets/{index-CTpfN0Cy.js → index-09CVJwsY.js} +1 -1
  12. langflow/frontend/assets/{index-DWUG3nTC.js → index-1MEYR1La.js} +1 -1
  13. langflow/frontend/assets/{index-Ds9y6kEK.js → index-2vQdFIK_.js} +1 -1
  14. langflow/frontend/assets/{index-DRdKSzTn.js → index-4Tl3Nxdo.js} +1 -1
  15. langflow/frontend/assets/{index-O_vPh7iD.js → index-5G402gB8.js} +1 -1
  16. langflow/frontend/assets/{index-D15h4ir2.js → index-5hW8VleF.js} +1 -1
  17. langflow/frontend/assets/{index-BydnMWnM.js → index-6GWpsedd.js} +1 -1
  18. langflow/frontend/assets/{index-4vIU43o6.js → index-7x3wNZ-4.js} +1 -1
  19. langflow/frontend/assets/{index-DrFpyu9Z.js → index-9gkURvG2.js} +1 -1
  20. langflow/frontend/assets/{index-DRe5h2N_.js → index-AOX7bbjJ.js} +1 -1
  21. langflow/frontend/assets/{index-fJyq3ZWN.js → index-B20KmxhS.js} +1 -1
  22. langflow/frontend/assets/{index-D_sHnnuS.js → index-B2EmwqKj.js} +1 -1
  23. langflow/frontend/assets/{index-DEc_2ba8.js → index-B4AtFbkN.js} +1 -1
  24. langflow/frontend/assets/{index-D_zQiboE.js → index-B4xLpgbM.js} +1 -1
  25. langflow/frontend/assets/{index-Db8Xgs-K.js → index-B9KRIJFi.js} +1 -1
  26. langflow/frontend/assets/{index-BzCZNz2f.js → index-B9uOBe6Y.js} +1 -1
  27. langflow/frontend/assets/{index-pFTvwRsJ.js → index-BDmbsLY2.js} +1 -1
  28. langflow/frontend/assets/{index-CGef2axA.js → index-BIKbxmIh.js} +1 -1
  29. langflow/frontend/assets/{index-BTl_mLju.js → index-BIjUtp6d.js} +1 -1
  30. langflow/frontend/assets/{index-Jze67eTW.js → index-BJIsQS8D.js} +1 -1
  31. langflow/frontend/assets/{index-DV-gdr7l.js → index-BO4fl1uU.js} +1 -1
  32. langflow/frontend/assets/{index-BUVmswbg.js → index-BRE8A4Q_.js} +1 -1
  33. langflow/frontend/assets/{index-CTzWsu8S.js → index-BRNhftot.js} +1 -1
  34. langflow/frontend/assets/{index-DFYBo38q.js → index-BRizlHaN.js} +1 -1
  35. langflow/frontend/assets/{index-DbPP5vss.js → index-BRwkzs92.js} +1 -1
  36. langflow/frontend/assets/{index-BzE7oL1n.js → index-BZCt_UnJ.js} +1 -1
  37. langflow/frontend/assets/{index-BhRSkpxu.js → index-B_ytx_iA.js} +1 -1
  38. langflow/frontend/assets/{index-ByCunkn4.js → index-BcqeL_f4.js} +1 -1
  39. langflow/frontend/assets/{index-CAAZbdRp.js → index-Bgd7yLoW.js} +1 -1
  40. langflow/frontend/assets/{index-DpDbxNdQ.js → index-BlRTHXW5.js} +1 -1
  41. langflow/frontend/assets/{index-jXSPQ_JS.js → index-BllNr21U.js} +1 -1
  42. langflow/frontend/assets/{index-fpMcQS2L.js → index-Bm7a2vMS.js} +1 -1
  43. langflow/frontend/assets/{index-BFQzmLDT.js → index-Bn4HAVDG.js} +1 -1
  44. langflow/frontend/assets/{index-D8EpAMC3.js → index-BwlYjc56.js} +1 -1
  45. langflow/frontend/assets/{index-BcCN9mpu.js → index-BzCjyHto.js} +1 -1
  46. langflow/frontend/assets/{index-D6-jZ4sc.js → index-C3RZz8WE.js} +1 -1
  47. langflow/frontend/assets/{index-D66JmFlL.js → index-C69gdJqw.js} +1 -1
  48. langflow/frontend/assets/{index-pYD0BTGu.js → index-C6P0vvSP.js} +1 -1
  49. langflow/frontend/assets/{index-CIjw_ZkP.js → index-C7wDSVVH.js} +1 -1
  50. langflow/frontend/assets/{index-BCTEK38J.js → index-CAzSTGAM.js} +1 -1
  51. langflow/frontend/assets/{index-8FjgS_Vj.js → index-CEn_71Wk.js} +1 -1
  52. langflow/frontend/assets/{index-BFiCUM5l.js → index-CGVDXKtN.js} +1 -1
  53. langflow/frontend/assets/{index-BIH2K0v8.js → index-CIYzjH2y.js} +1 -1
  54. langflow/frontend/assets/{index-gM8j2Wvk.js → index-COqjpsdy.js} +1 -1
  55. langflow/frontend/assets/{index-2q8IFBNP.js → index-CP0tFKwN.js} +1 -1
  56. langflow/frontend/assets/{index-CXpZa4H9.js → index-CPIdMJkX.js} +1 -1
  57. langflow/frontend/assets/{index-B-YjnRWx.js → index-CSRizl2S.js} +1 -1
  58. langflow/frontend/assets/{index-DFo0yfS5.js → index-CUe1ivTn.js} +1 -1
  59. langflow/frontend/assets/{index-C2x5hzgY.js → index-CVphnxXi.js} +1 -1
  60. langflow/frontend/assets/{index-Bz3QnhLZ.js → index-CY6LUi4V.js} +1 -1
  61. langflow/frontend/assets/{index-Cq6gk34q.js → index-C_2G2ZqJ.js} +1 -1
  62. langflow/frontend/assets/{index-CSXUVElo.js → index-C_K6Tof7.js} +1 -1
  63. langflow/frontend/assets/{index-1D7jZ8vz.js → index-C_UkF-RJ.js} +1 -1
  64. langflow/frontend/assets/{index-BVGZcHHC.js → index-Cbwk3f-p.js} +1 -1
  65. langflow/frontend/assets/{index-kiqvo0Zi.js → index-CdwjD4IX.js} +1 -1
  66. langflow/frontend/assets/{index-BNy3Al2s.js → index-CgbINWS8.js} +1 -1
  67. langflow/frontend/assets/{index-BXJpd9hg.js → index-CglSqvB5.js} +1 -1
  68. langflow/frontend/assets/{index-D9CF_54p.js → index-CmiRgF_-.js} +1 -1
  69. langflow/frontend/assets/{index-ez1EW657.js → index-Cp7Pmn03.js} +1 -1
  70. langflow/frontend/assets/{index-aypzjPzG.js → index-Cq30cQcP.js} +1 -1
  71. langflow/frontend/assets/index-CqS7zir1.css +1 -0
  72. langflow/frontend/assets/{index-DKv0y9Dp.js → index-Cr2oy5K2.js} +1 -1
  73. langflow/frontend/assets/{index-DrfwVxtD.js → index-Crq_yhkG.js} +1 -1
  74. langflow/frontend/assets/{index-CzJzRS6i.js → index-Cs_jt3dj.js} +1 -1
  75. langflow/frontend/assets/{index-DO0mS8FQ.js → index-Cy-ZEfWh.js} +1 -1
  76. langflow/frontend/assets/{index-Q0bwuTZY.js → index-Cyk3aCmP.js} +1 -1
  77. langflow/frontend/assets/{index-DToZROdu.js → index-D-HTZ68O.js} +1 -1
  78. langflow/frontend/assets/{index-C0AEZF1v.js → index-D1RgjMON.js} +1 -1
  79. langflow/frontend/assets/{index-DilRRF2S.js → index-D29n5mus.js} +1 -1
  80. langflow/frontend/assets/{index-CKLOrtrx.js → index-D2nHdRne.js} +1 -1
  81. langflow/frontend/assets/{index-sfFDGjjd.js → index-D7Vx6mgS.js} +1 -1
  82. langflow/frontend/assets/{index-BAHhLqW9.js → index-D7nFs6oq.js} +1 -1
  83. langflow/frontend/assets/{index-C7jY4x98.js → index-DAJafn16.js} +1 -1
  84. langflow/frontend/assets/{index-BefwTGbP.js → index-DDcpxWU4.js} +1 -1
  85. langflow/frontend/assets/{index-CTZ9iXFr.js → index-DEuXrfXH.js} +1 -1
  86. langflow/frontend/assets/{index-DFfr0xSt.js → index-DF0oWRdd.js} +1 -1
  87. langflow/frontend/assets/{index-Bh5pQAZC.js → index-DI0zAExi.js} +1 -1
  88. langflow/frontend/assets/{index-CG-Suo0F.js → index-DJs6FoYC.js} +1 -1
  89. langflow/frontend/assets/{index-dvTTQhKz.js → index-DNS4La1f.js} +1 -1
  90. langflow/frontend/assets/{index-nLDaeeZg.js → index-DOI0ceS-.js} +1 -1
  91. langflow/frontend/assets/{index-DakdEtbq.js → index-DOb9c2bf.js} +1 -1
  92. langflow/frontend/assets/{index-CEVnRp4_.js → index-DS4F_Phe.js} +1 -1
  93. langflow/frontend/assets/{index-DGRg2M1l.js → index-DTJX3yQa.js} +1 -1
  94. langflow/frontend/assets/{index-BjAsd-Vo.js → index-DVV_etfW.js} +1 -1
  95. langflow/frontend/assets/{index-BrIuZD2A.js → index-DX_InNVT.js} +1 -1
  96. langflow/frontend/assets/{index-jG-zLXRN.js → index-DbmqjLy6.js} +1 -1
  97. langflow/frontend/assets/{index-DSvOFGJR.js → index-Dc0p1Oxl.js} +1 -1
  98. langflow/frontend/assets/{index-87GFtXu5.js → index-DkJCCraf.js} +1 -1
  99. langflow/frontend/assets/{index-BXidWkLM.js → index-DlMAYATX.js} +1 -1
  100. langflow/frontend/assets/{index-sbTxhltT.js → index-DmaQAn3K.js} +1 -1
  101. langflow/frontend/assets/{index-DkC5vMvx.js → index-DmvjdU1N.js} +1 -1
  102. langflow/frontend/assets/{index-CSUglByd.js → index-DnusMCK1.js} +1 -1
  103. langflow/frontend/assets/{index-DZOTHXs0.js → index-DoFlaGDx.js} +1 -1
  104. langflow/frontend/assets/{index-CZkMjaa8.js → index-DqDQk0Cu.js} +1 -1
  105. langflow/frontend/assets/{index-lc10GnwG.js → index-DrvRK4_i.js} +1 -1
  106. langflow/frontend/assets/{index-BNm-yAYc.js → index-DtCsjX48.js} +1 -1
  107. langflow/frontend/assets/{index-BeLnhfG-.js → index-Dy7ehgeV.js} +1 -1
  108. langflow/frontend/assets/{index-RGG9hk9J.js → index-Dz0r9Idb.js} +1 -1
  109. langflow/frontend/assets/{index-Bcq2yA-p.js → index-DzDNhMMW.js} +1 -1
  110. langflow/frontend/assets/{index-P3f-GeAm.js → index-FYcoJPMP.js} +1 -1
  111. langflow/frontend/assets/{index-DQwvl_Rp.js → index-Iamzh9ZT.js} +1 -1
  112. langflow/frontend/assets/{index-Cy6n8tA9.js → index-J0pvFqLk.js} +1 -1
  113. langflow/frontend/assets/{index-D1XTMye3.js → index-J98sU-1p.js} +1 -1
  114. langflow/frontend/assets/{index-BZ0rL0tK.js → index-JHCxbvlW.js} +1 -1
  115. langflow/frontend/assets/{index-DmSH63k1.js → index-KnS52ylc.js} +1 -1
  116. langflow/frontend/assets/{index-WGZ88ShH.js → index-L7FKc9QN.js} +1 -1
  117. langflow/frontend/assets/{index-BIoFnUtx.js → index-RveG4dl9.js} +1 -1
  118. langflow/frontend/assets/{index-BDdkPrzu.js → index-T2jJOG85.js} +1 -1
  119. langflow/frontend/assets/{index-2839k6WO.js → index-TRyDa01A.js} +1 -1
  120. langflow/frontend/assets/{index-DvOdMz35.js → index-U7J1YiWE.js} +1 -1
  121. langflow/frontend/assets/{index-DzUx1-Bl.js → index-UI2ws3qp.js} +1984 -1984
  122. langflow/frontend/assets/{index-8Fx5I2fx.js → index-VO-pk-Hg.js} +1 -1
  123. langflow/frontend/assets/{index-e-RKmhti.js → index-_3qag0I4.js} +1 -1
  124. langflow/frontend/assets/{index-X67tRPXo.js → index-dfaj9-hY.js} +1 -1
  125. langflow/frontend/assets/{index-CHexGuNQ.js → index-eJwu5YEi.js} +1 -1
  126. langflow/frontend/assets/{index-Dz5YIK1W.js → index-in188l0A.js} +1 -1
  127. langflow/frontend/assets/{index-CTwkLLMr.js → index-pkOi9P45.js} +1 -1
  128. langflow/frontend/assets/{index-D6BaTmee.js → index-qXcoVIRo.js} +1 -1
  129. langflow/frontend/assets/{index-euS8RcNY.js → index-xVx59Op-.js} +1 -1
  130. langflow/frontend/assets/{index-C4WueQ4k.js → index-yIh6-LZT.js} +1 -1
  131. langflow/frontend/assets/lazyIconImports-kvf_Kak2.js +2 -0
  132. langflow/frontend/assets/{use-post-add-user-CA-_peAV.js → use-post-add-user-Bt6vZvvT.js} +1 -1
  133. langflow/frontend/index.html +2 -2
  134. langflow/initial_setup/starter_projects/Knowledge Ingestion.json +1052 -0
  135. langflow/initial_setup/starter_projects/Knowledge Retrieval.json +707 -0
  136. langflow/services/settings/base.py +3 -0
  137. {langflow_base_nightly-0.5.0.dev30.dist-info → langflow_base_nightly-0.5.0.dev31.dist-info}/METADATA +2 -1
  138. {langflow_base_nightly-0.5.0.dev30.dist-info → langflow_base_nightly-0.5.0.dev31.dist-info}/RECORD +140 -134
  139. langflow/frontend/assets/index-DIcdzk44.css +0 -1
  140. langflow/frontend/assets/lazyIconImports-lnczjBhY.js +0 -2
  141. {langflow_base_nightly-0.5.0.dev30.dist-info → langflow_base_nightly-0.5.0.dev31.dist-info}/WHEEL +0 -0
  142. {langflow_base_nightly-0.5.0.dev30.dist-info → langflow_base_nightly-0.5.0.dev31.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1052 @@
1
+ {
2
+ "data": {
3
+ "edges": [
4
+ {
5
+ "animated": false,
6
+ "className": "",
7
+ "data": {
8
+ "sourceHandle": {
9
+ "dataType": "URLComponent",
10
+ "id": "URLComponent-6JEUC",
11
+ "name": "page_results",
12
+ "output_types": [
13
+ "DataFrame"
14
+ ]
15
+ },
16
+ "targetHandle": {
17
+ "fieldName": "data_inputs",
18
+ "id": "SplitText-gvHe2",
19
+ "inputTypes": [
20
+ "Data",
21
+ "DataFrame",
22
+ "Message"
23
+ ],
24
+ "type": "other"
25
+ }
26
+ },
27
+ "id": "reactflow__edge-URLComponent-6JEUC{œdataTypeœ:œURLComponentœ,œidœ:œURLComponent-6JEUCœ,œnameœ:œpage_resultsœ,œoutput_typesœ:[œDataFrameœ]}-SplitText-gvHe2{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-gvHe2œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
28
+ "selected": false,
29
+ "source": "URLComponent-6JEUC",
30
+ "sourceHandle": "{œdataTypeœ: œURLComponentœ, œidœ: œURLComponent-6JEUCœ, œnameœ: œpage_resultsœ, œoutput_typesœ: [œDataFrameœ]}",
31
+ "target": "SplitText-gvHe2",
32
+ "targetHandle": "{œfieldNameœ: œdata_inputsœ, œidœ: œSplitText-gvHe2œ, œinputTypesœ: [œDataœ, œDataFrameœ, œMessageœ], œtypeœ: œotherœ}"
33
+ },
34
+ {
35
+ "animated": false,
36
+ "className": "",
37
+ "data": {
38
+ "sourceHandle": {
39
+ "dataType": "SplitText",
40
+ "id": "SplitText-gvHe2",
41
+ "name": "dataframe",
42
+ "output_types": [
43
+ "DataFrame"
44
+ ]
45
+ },
46
+ "targetHandle": {
47
+ "fieldName": "input_df",
48
+ "id": "KBIngestion-jj5iW",
49
+ "inputTypes": [
50
+ "DataFrame"
51
+ ],
52
+ "type": "other"
53
+ }
54
+ },
55
+ "id": "xy-edge__SplitText-gvHe2{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-gvHe2œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-KBIngestion-jj5iW{œfieldNameœ:œinput_dfœ,œidœ:œKBIngestion-jj5iWœ,œinputTypesœ:[œDataFrameœ],œtypeœ:œotherœ}",
56
+ "selected": false,
57
+ "source": "SplitText-gvHe2",
58
+ "sourceHandle": "{œdataTypeœ: œSplitTextœ, œidœ: œSplitText-gvHe2œ, œnameœ: œdataframeœ, œoutput_typesœ: [œDataFrameœ]}",
59
+ "target": "KBIngestion-jj5iW",
60
+ "targetHandle": "{œfieldNameœ: œinput_dfœ, œidœ: œKBIngestion-jj5iWœ, œinputTypesœ: [œDataFrameœ], œtypeœ: œotherœ}"
61
+ }
62
+ ],
63
+ "nodes": [
64
+ {
65
+ "data": {
66
+ "id": "SplitText-gvHe2",
67
+ "node": {
68
+ "base_classes": [
69
+ "DataFrame"
70
+ ],
71
+ "beta": false,
72
+ "conditional_paths": [],
73
+ "custom_fields": {},
74
+ "description": "Split text into chunks based on specified criteria.",
75
+ "display_name": "Split Text",
76
+ "documentation": "https://docs.langflow.org/components-processing#split-text",
77
+ "edited": false,
78
+ "field_order": [
79
+ "data_inputs",
80
+ "chunk_overlap",
81
+ "chunk_size",
82
+ "separator",
83
+ "text_key",
84
+ "keep_separator"
85
+ ],
86
+ "frozen": false,
87
+ "icon": "scissors-line-dashed",
88
+ "legacy": false,
89
+ "lf_version": "1.5.0.post1",
90
+ "metadata": {
91
+ "code_hash": "dbf2e9d2319d",
92
+ "module": "langflow.components.processing.split_text.SplitTextComponent"
93
+ },
94
+ "minimized": false,
95
+ "output_types": [],
96
+ "outputs": [
97
+ {
98
+ "allows_loop": false,
99
+ "cache": true,
100
+ "display_name": "Chunks",
101
+ "group_outputs": false,
102
+ "method": "split_text",
103
+ "name": "dataframe",
104
+ "selected": "DataFrame",
105
+ "tool_mode": true,
106
+ "types": [
107
+ "DataFrame"
108
+ ],
109
+ "value": "__UNDEFINED__"
110
+ }
111
+ ],
112
+ "pinned": false,
113
+ "template": {
114
+ "_type": "Component",
115
+ "chunk_overlap": {
116
+ "_input_type": "IntInput",
117
+ "advanced": false,
118
+ "display_name": "Chunk Overlap",
119
+ "dynamic": false,
120
+ "info": "Number of characters to overlap between chunks.",
121
+ "list": false,
122
+ "list_add_label": "Add More",
123
+ "name": "chunk_overlap",
124
+ "placeholder": "",
125
+ "required": false,
126
+ "show": true,
127
+ "title_case": false,
128
+ "tool_mode": false,
129
+ "trace_as_metadata": true,
130
+ "type": "int",
131
+ "value": 0
132
+ },
133
+ "chunk_size": {
134
+ "_input_type": "IntInput",
135
+ "advanced": false,
136
+ "display_name": "Chunk Size",
137
+ "dynamic": false,
138
+ "info": "The maximum length of each chunk. Text is first split by separator, then chunks are merged up to this size. Individual splits larger than this won't be further divided.",
139
+ "list": false,
140
+ "list_add_label": "Add More",
141
+ "name": "chunk_size",
142
+ "placeholder": "",
143
+ "required": false,
144
+ "show": true,
145
+ "title_case": false,
146
+ "tool_mode": false,
147
+ "trace_as_metadata": true,
148
+ "type": "int",
149
+ "value": 100
150
+ },
151
+ "code": {
152
+ "advanced": true,
153
+ "dynamic": true,
154
+ "fileTypes": [],
155
+ "file_path": "",
156
+ "info": "",
157
+ "list": false,
158
+ "load_from_db": false,
159
+ "multiline": true,
160
+ "name": "code",
161
+ "password": false,
162
+ "placeholder": "",
163
+ "required": true,
164
+ "show": true,
165
+ "title_case": false,
166
+ "type": "code",
167
+ "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n"
168
+ },
169
+ "data_inputs": {
170
+ "_input_type": "HandleInput",
171
+ "advanced": false,
172
+ "display_name": "Input",
173
+ "dynamic": false,
174
+ "info": "The data with texts to split in chunks.",
175
+ "input_types": [
176
+ "Data",
177
+ "DataFrame",
178
+ "Message"
179
+ ],
180
+ "list": false,
181
+ "list_add_label": "Add More",
182
+ "name": "data_inputs",
183
+ "placeholder": "",
184
+ "required": true,
185
+ "show": true,
186
+ "title_case": false,
187
+ "trace_as_metadata": true,
188
+ "type": "other",
189
+ "value": ""
190
+ },
191
+ "keep_separator": {
192
+ "_input_type": "DropdownInput",
193
+ "advanced": true,
194
+ "combobox": false,
195
+ "dialog_inputs": {},
196
+ "display_name": "Keep Separator",
197
+ "dynamic": false,
198
+ "info": "Whether to keep the separator in the output chunks and where to place it.",
199
+ "name": "keep_separator",
200
+ "options": [
201
+ "False",
202
+ "True",
203
+ "Start",
204
+ "End"
205
+ ],
206
+ "options_metadata": [],
207
+ "placeholder": "",
208
+ "required": false,
209
+ "show": true,
210
+ "title_case": false,
211
+ "toggle": false,
212
+ "tool_mode": false,
213
+ "trace_as_metadata": true,
214
+ "type": "str",
215
+ "value": "False"
216
+ },
217
+ "separator": {
218
+ "_input_type": "MessageTextInput",
219
+ "advanced": false,
220
+ "display_name": "Separator",
221
+ "dynamic": false,
222
+ "info": "The character to split on. Use \\n for newline. Examples: \\n\\n for paragraphs, \\n for lines, . for sentences",
223
+ "input_types": [
224
+ "Message"
225
+ ],
226
+ "list": false,
227
+ "list_add_label": "Add More",
228
+ "load_from_db": false,
229
+ "name": "separator",
230
+ "placeholder": "",
231
+ "required": false,
232
+ "show": true,
233
+ "title_case": false,
234
+ "tool_mode": false,
235
+ "trace_as_input": true,
236
+ "trace_as_metadata": true,
237
+ "type": "str",
238
+ "value": "\n"
239
+ },
240
+ "text_key": {
241
+ "_input_type": "MessageTextInput",
242
+ "advanced": true,
243
+ "display_name": "Text Key",
244
+ "dynamic": false,
245
+ "info": "The key to use for the text column.",
246
+ "input_types": [
247
+ "Message"
248
+ ],
249
+ "list": false,
250
+ "list_add_label": "Add More",
251
+ "load_from_db": false,
252
+ "name": "text_key",
253
+ "placeholder": "",
254
+ "required": false,
255
+ "show": true,
256
+ "title_case": false,
257
+ "tool_mode": false,
258
+ "trace_as_input": true,
259
+ "trace_as_metadata": true,
260
+ "type": "str",
261
+ "value": "text"
262
+ }
263
+ },
264
+ "tool_mode": false
265
+ },
266
+ "showNode": true,
267
+ "type": "SplitText"
268
+ },
269
+ "dragging": false,
270
+ "id": "SplitText-gvHe2",
271
+ "measured": {
272
+ "height": 413,
273
+ "width": 320
274
+ },
275
+ "position": {
276
+ "x": 620,
277
+ "y": 69.00284194946289
278
+ },
279
+ "selected": false,
280
+ "type": "genericNode"
281
+ },
282
+ {
283
+ "data": {
284
+ "id": "note-bpWz8",
285
+ "node": {
286
+ "description": "## Knowledge Ingestion\n\nThis flow shows the basics of the creation and ingestion of knowledge bases in Langflow. Here we use the `URL` component to dynamically fetch page data from the Langflow website, split it into chunks of 100 tokens, then ingest into a Knowledge Base.\n\n1. (Optional) Change the URL or switch to a different input data source as desired.\n2. (Optional) Adjust the Chunk Size as desired.\n3. Select or Create a new knowledge base.\n4. Ensure the column you wish to Vectorize is properly reflected in the Column Configuration table.",
287
+ "display_name": "",
288
+ "documentation": "",
289
+ "template": {}
290
+ },
291
+ "type": "note"
292
+ },
293
+ "dragging": false,
294
+ "height": 401,
295
+ "id": "note-bpWz8",
296
+ "measured": {
297
+ "height": 401,
298
+ "width": 388
299
+ },
300
+ "position": {
301
+ "x": -225.94224126537597,
302
+ "y": 75.97023827444744
303
+ },
304
+ "resizing": false,
305
+ "selected": true,
306
+ "type": "noteNode",
307
+ "width": 388
308
+ },
309
+ {
310
+ "data": {
311
+ "id": "URLComponent-6JEUC",
312
+ "node": {
313
+ "base_classes": [
314
+ "DataFrame",
315
+ "Message"
316
+ ],
317
+ "beta": false,
318
+ "conditional_paths": [],
319
+ "custom_fields": {},
320
+ "description": "Fetch content from one or more web pages, following links recursively.",
321
+ "display_name": "URL",
322
+ "documentation": "https://docs.langflow.org/components-data#url",
323
+ "edited": false,
324
+ "field_order": [
325
+ "urls",
326
+ "max_depth",
327
+ "prevent_outside",
328
+ "use_async",
329
+ "format",
330
+ "timeout",
331
+ "headers",
332
+ "filter_text_html",
333
+ "continue_on_failure",
334
+ "check_response_status",
335
+ "autoset_encoding"
336
+ ],
337
+ "frozen": false,
338
+ "icon": "layout-template",
339
+ "legacy": false,
340
+ "lf_version": "1.5.0.post1",
341
+ "metadata": {
342
+ "code_hash": "a81817a7f244",
343
+ "module": "langflow.components.data.url.URLComponent"
344
+ },
345
+ "minimized": false,
346
+ "output_types": [],
347
+ "outputs": [
348
+ {
349
+ "allows_loop": false,
350
+ "cache": true,
351
+ "display_name": "Extracted Pages",
352
+ "group_outputs": false,
353
+ "method": "fetch_content",
354
+ "name": "page_results",
355
+ "selected": "DataFrame",
356
+ "tool_mode": true,
357
+ "types": [
358
+ "DataFrame"
359
+ ],
360
+ "value": "__UNDEFINED__"
361
+ },
362
+ {
363
+ "allows_loop": false,
364
+ "cache": true,
365
+ "display_name": "Raw Content",
366
+ "group_outputs": false,
367
+ "method": "fetch_content_as_message",
368
+ "name": "raw_results",
369
+ "selected": null,
370
+ "tool_mode": false,
371
+ "types": [
372
+ "Message"
373
+ ],
374
+ "value": "__UNDEFINED__"
375
+ }
376
+ ],
377
+ "pinned": false,
378
+ "template": {
379
+ "_type": "Component",
380
+ "autoset_encoding": {
381
+ "_input_type": "BoolInput",
382
+ "advanced": true,
383
+ "display_name": "Autoset Encoding",
384
+ "dynamic": false,
385
+ "info": "If enabled, automatically sets the encoding of the request.",
386
+ "list": false,
387
+ "list_add_label": "Add More",
388
+ "name": "autoset_encoding",
389
+ "placeholder": "",
390
+ "required": false,
391
+ "show": true,
392
+ "title_case": false,
393
+ "tool_mode": false,
394
+ "trace_as_metadata": true,
395
+ "type": "bool",
396
+ "value": true
397
+ },
398
+ "check_response_status": {
399
+ "_input_type": "BoolInput",
400
+ "advanced": true,
401
+ "display_name": "Check Response Status",
402
+ "dynamic": false,
403
+ "info": "If enabled, checks the response status of the request.",
404
+ "list": false,
405
+ "list_add_label": "Add More",
406
+ "name": "check_response_status",
407
+ "placeholder": "",
408
+ "required": false,
409
+ "show": true,
410
+ "title_case": false,
411
+ "tool_mode": false,
412
+ "trace_as_metadata": true,
413
+ "type": "bool",
414
+ "value": false
415
+ },
416
+ "code": {
417
+ "advanced": true,
418
+ "dynamic": true,
419
+ "fileTypes": [],
420
+ "file_path": "",
421
+ "info": "",
422
+ "list": false,
423
+ "load_from_db": false,
424
+ "multiline": true,
425
+ "name": "code",
426
+ "password": false,
427
+ "placeholder": "",
428
+ "required": true,
429
+ "show": true,
430
+ "title_case": false,
431
+ "type": "code",
432
+ "value": "import re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom langchain_community.document_loaders import RecursiveUrlLoader\nfrom loguru import logger\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.helpers.data import safe_convert\nfrom langflow.io import BoolInput, DropdownInput, IntInput, MessageTextInput, Output, SliderInput, TableInput\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.services.deps import get_settings_service\n\n# Constants\nDEFAULT_TIMEOUT = 30\nDEFAULT_MAX_DEPTH = 1\nDEFAULT_FORMAT = \"Text\"\nURL_REGEX = re.compile(\n r\"^(https?:\\/\\/)?\" r\"(www\\.)?\" r\"([a-zA-Z0-9.-]+)\" r\"(\\.[a-zA-Z]{2,})?\" r\"(:\\d+)?\" r\"(\\/[^\\s]*)?$\",\n re.IGNORECASE,\n)\n\n\nclass URLComponent(Component):\n \"\"\"A component that loads and parses content from web pages recursively.\n\n This component allows fetching content from one or more URLs, with options to:\n - Control crawl depth\n - Prevent crawling outside the root domain\n - Use async loading for better performance\n - Extract either raw HTML or clean text\n - Configure request headers and timeouts\n \"\"\"\n\n display_name = \"URL\"\n description = \"Fetch content from one or more web pages, following links recursively.\"\n documentation: str = \"https://docs.langflow.org/components-data#url\"\n icon = \"layout-template\"\n name = \"URLComponent\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n info=\"Enter one or more URLs to crawl recursively, by clicking the '+' button.\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n input_types=[],\n ),\n SliderInput(\n name=\"max_depth\",\n display_name=\"Depth\",\n info=(\n \"Controls how many 'clicks' away from the initial page the crawler will go:\\n\"\n \"- depth 1: only the initial page\\n\"\n \"- depth 2: initial page + all pages linked directly from it\\n\"\n \"- depth 3: initial page + direct links + links found on those direct link pages\\n\"\n \"Note: This is about link traversal, not URL path depth.\"\n ),\n value=DEFAULT_MAX_DEPTH,\n range_spec=RangeSpec(min=1, max=5, step=1),\n required=False,\n min_label=\" \",\n max_label=\" \",\n min_label_icon=\"None\",\n max_label_icon=\"None\",\n # slider_input=True\n ),\n BoolInput(\n name=\"prevent_outside\",\n display_name=\"Prevent Outside\",\n info=(\n \"If enabled, only crawls URLs within the same domain as the root URL. \"\n \"This helps prevent the crawler from going to external websites.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"use_async\",\n display_name=\"Use Async\",\n info=(\n \"If enabled, uses asynchronous loading which can be significantly faster \"\n \"but might use more system resources.\"\n ),\n value=True,\n required=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.\",\n options=[\"Text\", \"HTML\"],\n value=DEFAULT_FORMAT,\n advanced=True,\n ),\n IntInput(\n name=\"timeout\",\n display_name=\"Timeout\",\n info=\"Timeout for the request in seconds.\",\n value=DEFAULT_TIMEOUT,\n required=False,\n advanced=True,\n ),\n TableInput(\n name=\"headers\",\n display_name=\"Headers\",\n info=\"The headers to send with the request\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Header\",\n \"type\": \"str\",\n \"description\": \"Header name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Header value\",\n },\n ],\n value=[{\"key\": \"User-Agent\", \"value\": get_settings_service().settings.user_agent}],\n advanced=True,\n input_types=[\"DataFrame\"],\n ),\n BoolInput(\n name=\"filter_text_html\",\n display_name=\"Filter Text/HTML\",\n info=\"If enabled, filters out text/css content type from the results.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"continue_on_failure\",\n display_name=\"Continue on Failure\",\n info=\"If enabled, continues crawling even if some requests fail.\",\n value=True,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"check_response_status\",\n display_name=\"Check Response Status\",\n info=\"If enabled, checks the response status of the request.\",\n value=False,\n required=False,\n advanced=True,\n ),\n BoolInput(\n name=\"autoset_encoding\",\n display_name=\"Autoset Encoding\",\n info=\"If enabled, automatically sets the encoding of the request.\",\n value=True,\n required=False,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Extracted Pages\", name=\"page_results\", method=\"fetch_content\"),\n Output(display_name=\"Raw Content\", name=\"raw_results\", method=\"fetch_content_as_message\", tool_mode=False),\n ]\n\n @staticmethod\n def validate_url(url: str) -> bool:\n \"\"\"Validates if the given string matches URL pattern.\n\n Args:\n url: The URL string to validate\n\n Returns:\n bool: True if the URL is valid, False otherwise\n \"\"\"\n return bool(URL_REGEX.match(url))\n\n def ensure_url(self, url: str) -> str:\n \"\"\"Ensures the given string is a valid URL.\n\n Args:\n url: The URL string to validate and normalize\n\n Returns:\n str: The normalized URL\n\n Raises:\n ValueError: If the URL is invalid\n \"\"\"\n url = url.strip()\n if not url.startswith((\"http://\", \"https://\")):\n url = \"https://\" + url\n\n if not self.validate_url(url):\n msg = f\"Invalid URL: {url}\"\n raise ValueError(msg)\n\n return url\n\n def _create_loader(self, url: str) -> RecursiveUrlLoader:\n \"\"\"Creates a RecursiveUrlLoader instance with the configured settings.\n\n Args:\n url: The URL to load\n\n Returns:\n RecursiveUrlLoader: Configured loader instance\n \"\"\"\n headers_dict = {header[\"key\"]: header[\"value\"] for header in self.headers}\n extractor = (lambda x: x) if self.format == \"HTML\" else (lambda x: BeautifulSoup(x, \"lxml\").get_text())\n\n return RecursiveUrlLoader(\n url=url,\n max_depth=self.max_depth,\n prevent_outside=self.prevent_outside,\n use_async=self.use_async,\n extractor=extractor,\n timeout=self.timeout,\n headers=headers_dict,\n check_response_status=self.check_response_status,\n continue_on_failure=self.continue_on_failure,\n base_url=url, # Add base_url to ensure consistent domain crawling\n autoset_encoding=self.autoset_encoding, # Enable automatic encoding detection\n exclude_dirs=[], # Allow customization of excluded directories\n link_regex=None, # Allow customization of link filtering\n )\n\n def fetch_url_contents(self) -> list[dict]:\n \"\"\"Load documents from the configured URLs.\n\n Returns:\n List[Data]: List of Data objects containing the fetched content\n\n Raises:\n ValueError: If no valid URLs are provided or if there's an error loading documents\n \"\"\"\n try:\n urls = list({self.ensure_url(url) for url in self.urls if url.strip()})\n logger.debug(f\"URLs: {urls}\")\n if not urls:\n msg = \"No valid URLs provided.\"\n raise ValueError(msg)\n\n all_docs = []\n for url in urls:\n logger.debug(f\"Loading documents from {url}\")\n\n try:\n loader = self._create_loader(url)\n docs = loader.load()\n\n if not docs:\n logger.warning(f\"No documents found for {url}\")\n continue\n\n logger.debug(f\"Found {len(docs)} documents from {url}\")\n all_docs.extend(docs)\n\n except requests.exceptions.RequestException as e:\n logger.exception(f\"Error loading documents from {url}: {e}\")\n continue\n\n if not all_docs:\n msg = \"No documents were successfully loaded from any URL\"\n raise ValueError(msg)\n\n # data = [Data(text=doc.page_content, **doc.metadata) for doc in all_docs]\n data = [\n {\n \"text\": safe_convert(doc.page_content, clean_data=True),\n \"url\": doc.metadata.get(\"source\", \"\"),\n \"title\": doc.metadata.get(\"title\", \"\"),\n \"description\": doc.metadata.get(\"description\", \"\"),\n \"content_type\": doc.metadata.get(\"content_type\", \"\"),\n \"language\": doc.metadata.get(\"language\", \"\"),\n }\n for doc in all_docs\n ]\n except Exception as e:\n error_msg = e.message if hasattr(e, \"message\") else e\n msg = f\"Error loading documents: {error_msg!s}\"\n logger.exception(msg)\n raise ValueError(msg) from e\n return data\n\n def fetch_content(self) -> DataFrame:\n \"\"\"Convert the documents to a DataFrame.\"\"\"\n return DataFrame(data=self.fetch_url_contents())\n\n def fetch_content_as_message(self) -> Message:\n \"\"\"Convert the documents to a Message.\"\"\"\n url_contents = self.fetch_url_contents()\n return Message(text=\"\\n\\n\".join([x[\"text\"] for x in url_contents]), data={\"data\": url_contents})\n"
433
+ },
434
+ "continue_on_failure": {
435
+ "_input_type": "BoolInput",
436
+ "advanced": true,
437
+ "display_name": "Continue on Failure",
438
+ "dynamic": false,
439
+ "info": "If enabled, continues crawling even if some requests fail.",
440
+ "list": false,
441
+ "list_add_label": "Add More",
442
+ "name": "continue_on_failure",
443
+ "placeholder": "",
444
+ "required": false,
445
+ "show": true,
446
+ "title_case": false,
447
+ "tool_mode": false,
448
+ "trace_as_metadata": true,
449
+ "type": "bool",
450
+ "value": true
451
+ },
452
+ "filter_text_html": {
453
+ "_input_type": "BoolInput",
454
+ "advanced": true,
455
+ "display_name": "Filter Text/HTML",
456
+ "dynamic": false,
457
+ "info": "If enabled, filters out text/css content type from the results.",
458
+ "list": false,
459
+ "list_add_label": "Add More",
460
+ "name": "filter_text_html",
461
+ "placeholder": "",
462
+ "required": false,
463
+ "show": true,
464
+ "title_case": false,
465
+ "tool_mode": false,
466
+ "trace_as_metadata": true,
467
+ "type": "bool",
468
+ "value": true
469
+ },
470
+ "format": {
471
+ "_input_type": "DropdownInput",
472
+ "advanced": true,
473
+ "combobox": false,
474
+ "dialog_inputs": {},
475
+ "display_name": "Output Format",
476
+ "dynamic": false,
477
+ "info": "Output Format. Use 'Text' to extract the text from the HTML or 'HTML' for the raw HTML content.",
478
+ "name": "format",
479
+ "options": [
480
+ "Text",
481
+ "HTML"
482
+ ],
483
+ "options_metadata": [],
484
+ "placeholder": "",
485
+ "required": false,
486
+ "show": true,
487
+ "title_case": false,
488
+ "toggle": false,
489
+ "tool_mode": false,
490
+ "trace_as_metadata": true,
491
+ "type": "str",
492
+ "value": "Text"
493
+ },
494
+ "headers": {
495
+ "_input_type": "TableInput",
496
+ "advanced": true,
497
+ "display_name": "Headers",
498
+ "dynamic": false,
499
+ "info": "The headers to send with the request",
500
+ "input_types": [
501
+ "DataFrame"
502
+ ],
503
+ "is_list": true,
504
+ "list_add_label": "Add More",
505
+ "name": "headers",
506
+ "placeholder": "",
507
+ "required": false,
508
+ "show": true,
509
+ "table_icon": "Table",
510
+ "table_schema": {
511
+ "columns": [
512
+ {
513
+ "default": "None",
514
+ "description": "Header name",
515
+ "disable_edit": false,
516
+ "display_name": "Header",
517
+ "edit_mode": "popover",
518
+ "filterable": true,
519
+ "formatter": "text",
520
+ "hidden": false,
521
+ "name": "key",
522
+ "sortable": true,
523
+ "type": "str"
524
+ },
525
+ {
526
+ "default": "None",
527
+ "description": "Header value",
528
+ "disable_edit": false,
529
+ "display_name": "Value",
530
+ "edit_mode": "popover",
531
+ "filterable": true,
532
+ "formatter": "text",
533
+ "hidden": false,
534
+ "name": "value",
535
+ "sortable": true,
536
+ "type": "str"
537
+ }
538
+ ]
539
+ },
540
+ "title_case": false,
541
+ "tool_mode": false,
542
+ "trace_as_metadata": true,
543
+ "trigger_icon": "Table",
544
+ "trigger_text": "Open table",
545
+ "type": "table",
546
+ "value": [
547
+ {
548
+ "key": "User-Agent",
549
+ "value": "langflow"
550
+ }
551
+ ]
552
+ },
553
+ "max_depth": {
554
+ "_input_type": "SliderInput",
555
+ "advanced": false,
556
+ "display_name": "Depth",
557
+ "dynamic": false,
558
+ "info": "Controls how many 'clicks' away from the initial page the crawler will go:\n- depth 1: only the initial page\n- depth 2: initial page + all pages linked directly from it\n- depth 3: initial page + direct links + links found on those direct link pages\nNote: This is about link traversal, not URL path depth.",
559
+ "max_label": " ",
560
+ "max_label_icon": "None",
561
+ "min_label": " ",
562
+ "min_label_icon": "None",
563
+ "name": "max_depth",
564
+ "placeholder": "",
565
+ "range_spec": {
566
+ "max": 5,
567
+ "min": 1,
568
+ "step": 1,
569
+ "step_type": "float"
570
+ },
571
+ "required": false,
572
+ "show": true,
573
+ "slider_buttons": false,
574
+ "slider_buttons_options": [],
575
+ "slider_input": false,
576
+ "title_case": false,
577
+ "tool_mode": false,
578
+ "type": "slider",
579
+ "value": 2
580
+ },
581
+ "prevent_outside": {
582
+ "_input_type": "BoolInput",
583
+ "advanced": true,
584
+ "display_name": "Prevent Outside",
585
+ "dynamic": false,
586
+ "info": "If enabled, only crawls URLs within the same domain as the root URL. This helps prevent the crawler from going to external websites.",
587
+ "list": false,
588
+ "list_add_label": "Add More",
589
+ "name": "prevent_outside",
590
+ "placeholder": "",
591
+ "required": false,
592
+ "show": true,
593
+ "title_case": false,
594
+ "tool_mode": false,
595
+ "trace_as_metadata": true,
596
+ "type": "bool",
597
+ "value": true
598
+ },
599
+ "timeout": {
600
+ "_input_type": "IntInput",
601
+ "advanced": true,
602
+ "display_name": "Timeout",
603
+ "dynamic": false,
604
+ "info": "Timeout for the request in seconds.",
605
+ "list": false,
606
+ "list_add_label": "Add More",
607
+ "name": "timeout",
608
+ "placeholder": "",
609
+ "required": false,
610
+ "show": true,
611
+ "title_case": false,
612
+ "tool_mode": false,
613
+ "trace_as_metadata": true,
614
+ "type": "int",
615
+ "value": 30
616
+ },
617
+ "urls": {
618
+ "_input_type": "MessageTextInput",
619
+ "advanced": false,
620
+ "display_name": "URLs",
621
+ "dynamic": false,
622
+ "info": "Enter one or more URLs to crawl recursively, by clicking the '+' button.",
623
+ "input_types": [],
624
+ "list": true,
625
+ "list_add_label": "Add URL",
626
+ "load_from_db": false,
627
+ "name": "urls",
628
+ "placeholder": "Enter a URL...",
629
+ "required": false,
630
+ "show": true,
631
+ "title_case": false,
632
+ "tool_mode": true,
633
+ "trace_as_input": true,
634
+ "trace_as_metadata": true,
635
+ "type": "str",
636
+ "value": [
637
+ "https://langflow.org"
638
+ ]
639
+ },
640
+ "use_async": {
641
+ "_input_type": "BoolInput",
642
+ "advanced": true,
643
+ "display_name": "Use Async",
644
+ "dynamic": false,
645
+ "info": "If enabled, uses asynchronous loading which can be significantly faster but might use more system resources.",
646
+ "list": false,
647
+ "list_add_label": "Add More",
648
+ "name": "use_async",
649
+ "placeholder": "",
650
+ "required": false,
651
+ "show": true,
652
+ "title_case": false,
653
+ "tool_mode": false,
654
+ "trace_as_metadata": true,
655
+ "type": "bool",
656
+ "value": true
657
+ }
658
+ },
659
+ "tool_mode": false
660
+ },
661
+ "selected_output": "page_results",
662
+ "showNode": true,
663
+ "type": "URLComponent"
664
+ },
665
+ "dragging": false,
666
+ "id": "URLComponent-6JEUC",
667
+ "measured": {
668
+ "height": 292,
669
+ "width": 320
670
+ },
671
+ "position": {
672
+ "x": 238.30016557701828,
673
+ "y": 132.82375729958179
674
+ },
675
+ "selected": false,
676
+ "type": "genericNode"
677
+ },
678
+ {
679
+ "data": {
680
+ "id": "KBIngestion-jj5iW",
681
+ "node": {
682
+ "base_classes": [
683
+ "Data"
684
+ ],
685
+ "beta": false,
686
+ "conditional_paths": [],
687
+ "custom_fields": {},
688
+ "description": "Create or update knowledge in Langflow.",
689
+ "display_name": "Knowledge Ingestion",
690
+ "documentation": "",
691
+ "edited": false,
692
+ "field_order": [
693
+ "knowledge_base",
694
+ "input_df",
695
+ "column_config",
696
+ "chunk_size",
697
+ "api_key",
698
+ "allow_duplicates"
699
+ ],
700
+ "frozen": false,
701
+ "icon": "database",
702
+ "last_updated": "2025-08-13T19:45:49.122Z",
703
+ "legacy": false,
704
+ "metadata": {
705
+ "code_hash": "11df19de541d",
706
+ "module": "langflow.components.data.kb_ingest.KBIngestionComponent"
707
+ },
708
+ "minimized": false,
709
+ "output_types": [],
710
+ "outputs": [
711
+ {
712
+ "allows_loop": false,
713
+ "cache": true,
714
+ "display_name": "DataFrame",
715
+ "group_outputs": false,
716
+ "method": "build_kb_info",
717
+ "name": "dataframe",
718
+ "selected": "Data",
719
+ "tool_mode": true,
720
+ "types": [
721
+ "Data"
722
+ ],
723
+ "value": "__UNDEFINED__"
724
+ }
725
+ ],
726
+ "pinned": false,
727
+ "template": {
728
+ "_type": "Component",
729
+ "allow_duplicates": {
730
+ "_input_type": "BoolInput",
731
+ "advanced": true,
732
+ "display_name": "Allow Duplicates",
733
+ "dynamic": false,
734
+ "info": "Allow duplicate rows in the knowledge base",
735
+ "list": false,
736
+ "list_add_label": "Add More",
737
+ "name": "allow_duplicates",
738
+ "placeholder": "",
739
+ "required": false,
740
+ "show": true,
741
+ "title_case": false,
742
+ "tool_mode": false,
743
+ "trace_as_metadata": true,
744
+ "type": "bool",
745
+ "value": false
746
+ },
747
+ "api_key": {
748
+ "_input_type": "SecretStrInput",
749
+ "advanced": true,
750
+ "display_name": "Embedding Provider API Key",
751
+ "dynamic": false,
752
+ "info": "API key for the embedding provider to generate embeddings.",
753
+ "input_types": [],
754
+ "load_from_db": false,
755
+ "name": "api_key",
756
+ "password": true,
757
+ "placeholder": "",
758
+ "required": false,
759
+ "show": true,
760
+ "title_case": false,
761
+ "type": "str",
762
+ "value": ""
763
+ },
764
+ "chunk_size": {
765
+ "_input_type": "IntInput",
766
+ "advanced": true,
767
+ "display_name": "Chunk Size",
768
+ "dynamic": false,
769
+ "info": "Batch size for processing embeddings",
770
+ "list": false,
771
+ "list_add_label": "Add More",
772
+ "name": "chunk_size",
773
+ "placeholder": "",
774
+ "required": false,
775
+ "show": true,
776
+ "title_case": false,
777
+ "tool_mode": false,
778
+ "trace_as_metadata": true,
779
+ "type": "int",
780
+ "value": 1000
781
+ },
782
+ "code": {
783
+ "advanced": true,
784
+ "dynamic": true,
785
+ "fileTypes": [],
786
+ "file_path": "",
787
+ "info": "",
788
+ "list": false,
789
+ "load_from_db": false,
790
+ "multiline": true,
791
+ "name": "code",
792
+ "password": false,
793
+ "placeholder": "",
794
+ "required": true,
795
+ "show": true,
796
+ "title_case": false,
797
+ "type": "code",
798
+ "value": "from __future__ import annotations\n\nimport hashlib\nimport json\nimport re\nimport uuid\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime, timezone\nfrom pathlib import Path\nfrom typing import Any\n\nimport pandas as pd\nfrom cryptography.fernet import InvalidToken\nfrom langchain_chroma import Chroma\nfrom loguru import logger\n\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DataFrameInput, DropdownInput, IntInput, Output, SecretStrInput, StrInput, TableInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dotdict import dotdict # noqa: TC001\nfrom langflow.schema.table import EditMode\nfrom langflow.services.auth.utils import decrypt_api_key, encrypt_api_key\nfrom langflow.services.deps import get_settings_service\n\nHUGGINGFACE_MODEL_NAMES = [\"sentence-transformers/all-MiniLM-L6-v2\", \"sentence-transformers/all-mpnet-base-v2\"]\nCOHERE_MODEL_NAMES = [\"embed-english-v3.0\", \"embed-multilingual-v3.0\"]\n\nsettings = get_settings_service().settings\nknowledge_directory = settings.knowledge_bases_dir\nif not knowledge_directory:\n msg = \"Knowledge bases directory is not set in the settings.\"\n raise ValueError(msg)\nKNOWLEDGE_BASES_ROOT_PATH = Path(knowledge_directory).expanduser()\n\n\nclass KBIngestionComponent(Component):\n \"\"\"Create or append to Langflow Knowledge from a DataFrame.\"\"\"\n\n # ------ UI metadata ---------------------------------------------------\n display_name = \"Knowledge Ingestion\"\n description = \"Create or update knowledge in Langflow.\"\n icon = \"database\"\n name = \"KBIngestion\"\n\n @dataclass\n class NewKnowledgeBaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"name\": \"create_knowledge_base\",\n \"description\": \"Create new knowledge in Langflow.\",\n \"display_name\": \"Create new knowledge\",\n \"field_order\": [\"01_new_kb_name\", \"02_embedding_model\", \"03_api_key\"],\n \"template\": {\n \"01_new_kb_name\": StrInput(\n name=\"new_kb_name\",\n display_name=\"Knowledge Name\",\n info=\"Name of the new knowledge to create.\",\n required=True,\n ),\n \"02_embedding_model\": DropdownInput(\n name=\"embedding_model\",\n display_name=\"Model Name\",\n info=\"Select the embedding model to use for this knowledge base.\",\n required=True,\n options=OPENAI_EMBEDDING_MODEL_NAMES + HUGGINGFACE_MODEL_NAMES + COHERE_MODEL_NAMES,\n options_metadata=[{\"icon\": \"OpenAI\"} for _ in OPENAI_EMBEDDING_MODEL_NAMES]\n + [{\"icon\": \"HuggingFace\"} for _ in HUGGINGFACE_MODEL_NAMES]\n + [{\"icon\": \"Cohere\"} for _ in COHERE_MODEL_NAMES],\n ),\n \"03_api_key\": SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"Provider API key for embedding model\",\n required=True,\n load_from_db=True,\n ),\n },\n },\n }\n }\n )\n\n # ------ Inputs --------------------------------------------------------\n inputs = [\n DropdownInput(\n name=\"knowledge_base\",\n display_name=\"Knowledge\",\n info=\"Select the knowledge to load data from.\",\n required=True,\n options=[\n str(d.name) for d in KNOWLEDGE_BASES_ROOT_PATH.iterdir() if not d.name.startswith(\".\") and d.is_dir()\n ]\n if KNOWLEDGE_BASES_ROOT_PATH.exists()\n else [],\n refresh_button=True,\n dialog_inputs=asdict(NewKnowledgeBaseInput()),\n ),\n DataFrameInput(\n name=\"input_df\",\n display_name=\"Data\",\n info=\"Table with all original columns (already chunked / processed).\",\n required=True,\n ),\n TableInput(\n name=\"column_config\",\n display_name=\"Column Configuration\",\n info=\"Configure column behavior for the knowledge base.\",\n required=True,\n table_schema=[\n {\n \"name\": \"column_name\",\n \"display_name\": \"Column Name\",\n \"type\": \"str\",\n \"description\": \"Name of the column in the source DataFrame\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"vectorize\",\n \"display_name\": \"Vectorize\",\n \"type\": \"boolean\",\n \"description\": \"Create embeddings for this column\",\n \"default\": False,\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"identifier\",\n \"display_name\": \"Identifier\",\n \"type\": \"boolean\",\n \"description\": \"Use this column as unique identifier\",\n \"default\": False,\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n value=[\n {\n \"column_name\": \"text\",\n \"vectorize\": True,\n \"identifier\": False,\n }\n ],\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=\"Batch size for processing embeddings\",\n advanced=True,\n value=1000,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Embedding Provider API Key\",\n info=\"API key for the embedding provider to generate embeddings.\",\n advanced=True,\n required=False,\n ),\n BoolInput(\n name=\"allow_duplicates\",\n display_name=\"Allow Duplicates\",\n info=\"Allow duplicate rows in the knowledge base\",\n advanced=True,\n value=False,\n ),\n ]\n\n # ------ Outputs -------------------------------------------------------\n outputs = [Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"build_kb_info\")]\n\n # ------ Internal helpers ---------------------------------------------\n def _get_kb_root(self) -> Path:\n \"\"\"Return the root directory for knowledge bases.\"\"\"\n return KNOWLEDGE_BASES_ROOT_PATH\n\n def _validate_column_config(self, df_source: pd.DataFrame) -> list[dict[str, Any]]:\n \"\"\"Validate column configuration using Structured Output patterns.\"\"\"\n if not self.column_config:\n msg = \"Column configuration cannot be empty\"\n raise ValueError(msg)\n\n # Convert table input to list of dicts (similar to Structured Output)\n config_list = self.column_config if isinstance(self.column_config, list) else []\n\n # Validate column names exist in DataFrame\n df_columns = set(df_source.columns)\n for config in config_list:\n col_name = config.get(\"column_name\")\n if col_name not in df_columns and not self.silent_errors:\n msg = f\"Column '{col_name}' not found in DataFrame. Available columns: {sorted(df_columns)}\"\n self.log(f\"Warning: {msg}\")\n raise ValueError(msg)\n\n return config_list\n\n def _get_embedding_provider(self, embedding_model: str) -> str:\n \"\"\"Get embedding provider by matching model name to lists.\"\"\"\n if embedding_model in OPENAI_EMBEDDING_MODEL_NAMES:\n return \"OpenAI\"\n if embedding_model in HUGGINGFACE_MODEL_NAMES:\n return \"HuggingFace\"\n if embedding_model in COHERE_MODEL_NAMES:\n return \"Cohere\"\n return \"Custom\"\n\n def _build_embeddings(self, embedding_model: str, api_key: str):\n \"\"\"Build embedding model using provider patterns.\"\"\"\n # Get provider by matching model name to lists\n provider = self._get_embedding_provider(embedding_model)\n\n # Validate provider and model\n if provider == \"OpenAI\":\n from langchain_openai import OpenAIEmbeddings\n\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=embedding_model,\n api_key=api_key,\n chunk_size=self.chunk_size,\n )\n if provider == \"HuggingFace\":\n from langchain_huggingface import HuggingFaceEmbeddings\n\n return HuggingFaceEmbeddings(\n model=embedding_model,\n )\n if provider == \"Cohere\":\n from langchain_cohere import CohereEmbeddings\n\n if not api_key:\n msg = \"Cohere API key is required when using Cohere provider\"\n raise ValueError(msg)\n return CohereEmbeddings(\n model=embedding_model,\n cohere_api_key=api_key,\n )\n if provider == \"Custom\":\n # For custom embedding models, we would need additional configuration\n msg = \"Custom embedding models not yet supported\"\n raise NotImplementedError(msg)\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def _build_embedding_metadata(self, embedding_model, api_key) -> dict[str, Any]:\n \"\"\"Build embedding model metadata.\"\"\"\n # Get provider by matching model name to lists\n embedding_provider = self._get_embedding_provider(embedding_model)\n\n api_key_to_save = None\n if api_key and hasattr(api_key, \"get_secret_value\"):\n api_key_to_save = api_key.get_secret_value()\n elif isinstance(api_key, str):\n api_key_to_save = api_key\n\n encrypted_api_key = None\n if api_key_to_save:\n settings_service = get_settings_service()\n try:\n encrypted_api_key = encrypt_api_key(api_key_to_save, settings_service=settings_service)\n except (TypeError, ValueError) as e:\n self.log(f\"Could not encrypt API key: {e}\")\n logger.error(f\"Could not encrypt API key: {e}\")\n\n return {\n \"embedding_provider\": embedding_provider,\n \"embedding_model\": embedding_model,\n \"api_key\": encrypted_api_key,\n \"api_key_used\": bool(api_key),\n \"chunk_size\": self.chunk_size,\n \"created_at\": datetime.now(timezone.utc).isoformat(),\n }\n\n def _save_embedding_metadata(self, kb_path: Path, embedding_model: str, api_key: str) -> None:\n \"\"\"Save embedding model metadata.\"\"\"\n embedding_metadata = self._build_embedding_metadata(embedding_model, api_key)\n metadata_path = kb_path / \"embedding_metadata.json\"\n metadata_path.write_text(json.dumps(embedding_metadata, indent=2))\n\n def _save_kb_files(\n self,\n kb_path: Path,\n config_list: list[dict[str, Any]],\n ) -> None:\n \"\"\"Save KB files using File Component storage patterns.\"\"\"\n try:\n # Create directory (following File Component patterns)\n kb_path.mkdir(parents=True, exist_ok=True)\n\n # Save column configuration\n # Only do this if the file doesn't exist already\n cfg_path = kb_path / \"schema.json\"\n if not cfg_path.exists():\n cfg_path.write_text(json.dumps(config_list, indent=2))\n\n except Exception as e:\n if not self.silent_errors:\n raise\n self.log(f\"Error saving KB files: {e}\")\n\n def _build_column_metadata(self, config_list: list[dict[str, Any]], df_source: pd.DataFrame) -> dict[str, Any]:\n \"\"\"Build detailed column metadata.\"\"\"\n metadata: dict[str, Any] = {\n \"total_columns\": len(df_source.columns),\n \"mapped_columns\": len(config_list),\n \"unmapped_columns\": len(df_source.columns) - len(config_list),\n \"columns\": [],\n \"summary\": {\"vectorized_columns\": [], \"identifier_columns\": []},\n }\n\n for config in config_list:\n col_name = config.get(\"column_name\")\n vectorize = config.get(\"vectorize\") == \"True\" or config.get(\"vectorize\") is True\n identifier = config.get(\"identifier\") == \"True\" or config.get(\"identifier\") is True\n\n # Add to columns list\n metadata[\"columns\"].append(\n {\n \"name\": col_name,\n \"vectorize\": vectorize,\n \"identifier\": identifier,\n }\n )\n\n # Update summary\n if vectorize:\n metadata[\"summary\"][\"vectorized_columns\"].append(col_name)\n if identifier:\n metadata[\"summary\"][\"identifier_columns\"].append(col_name)\n\n return metadata\n\n def _create_vector_store(\n self, df_source: pd.DataFrame, config_list: list[dict[str, Any]], embedding_model: str, api_key: str\n ) -> None:\n \"\"\"Create vector store following Local DB component pattern.\"\"\"\n try:\n # Set up vector store directory\n base_dir = self._get_kb_root()\n\n vector_store_dir = base_dir / self.knowledge_base\n vector_store_dir.mkdir(parents=True, exist_ok=True)\n\n # Create embeddings model\n embedding_function = self._build_embeddings(embedding_model, api_key)\n\n # Convert DataFrame to Data objects (following Local DB pattern)\n data_objects = self._convert_df_to_data_objects(df_source, config_list)\n\n # Create vector store\n chroma = Chroma(\n persist_directory=str(vector_store_dir),\n embedding_function=embedding_function,\n collection_name=self.knowledge_base,\n )\n\n # Convert Data objects to LangChain Documents\n documents = []\n for data_obj in data_objects:\n doc = data_obj.to_lc_document()\n documents.append(doc)\n\n # Add documents to vector store\n if documents:\n chroma.add_documents(documents)\n self.log(f\"Added {len(documents)} documents to vector store '{self.knowledge_base}'\")\n\n except Exception as e:\n if not self.silent_errors:\n raise\n self.log(f\"Error creating vector store: {e}\")\n\n def _convert_df_to_data_objects(self, df_source: pd.DataFrame, config_list: list[dict[str, Any]]) -> list[Data]:\n \"\"\"Convert DataFrame to Data objects for vector store.\"\"\"\n data_objects: list[Data] = []\n\n # Set up vector store directory\n base_dir = self._get_kb_root()\n\n # If we don't allow duplicates, we need to get the existing hashes\n chroma = Chroma(\n persist_directory=str(base_dir / self.knowledge_base),\n collection_name=self.knowledge_base,\n )\n\n # Get all documents and their metadata\n all_docs = chroma.get()\n\n # Extract all _id values from metadata\n id_list = [metadata.get(\"_id\") for metadata in all_docs[\"metadatas\"] if metadata.get(\"_id\")]\n\n # Get column roles\n content_cols = []\n identifier_cols = []\n\n for config in config_list:\n col_name = config.get(\"column_name\")\n vectorize = config.get(\"vectorize\") == \"True\" or config.get(\"vectorize\") is True\n identifier = config.get(\"identifier\") == \"True\" or config.get(\"identifier\") is True\n\n if vectorize:\n content_cols.append(col_name)\n elif identifier:\n identifier_cols.append(col_name)\n\n # Convert each row to a Data object\n for _, row in df_source.iterrows():\n # Build content text from vectorized columns using list comprehension\n content_parts = [str(row[col]) for col in content_cols if col in row and pd.notna(row[col])]\n\n page_content = \" \".join(content_parts)\n\n # Build metadata from NON-vectorized columns only (simple key-value pairs)\n data_dict = {\n \"text\": page_content, # Main content for vectorization\n }\n\n # Add metadata columns as simple key-value pairs\n for col in df_source.columns:\n if col not in content_cols and col in row and pd.notna(row[col]):\n # Convert to simple types for Chroma metadata\n value = row[col]\n data_dict[col] = str(value) # Convert complex types to string\n\n # Hash the page_content for unique ID\n page_content_hash = hashlib.sha256(page_content.encode()).hexdigest()\n data_dict[\"_id\"] = page_content_hash\n\n # If duplicates are disallowed, and hash exists, prevent adding this row\n if not self.allow_duplicates and page_content_hash in id_list:\n self.log(f\"Skipping duplicate row with hash {page_content_hash}\")\n continue\n\n # Create Data object - everything except \"text\" becomes metadata\n data_obj = Data(data=data_dict)\n data_objects.append(data_obj)\n\n return data_objects\n\n def is_valid_collection_name(self, name, min_length: int = 3, max_length: int = 63) -> bool:\n \"\"\"Validates collection name against conditions 1-3.\n\n 1. Contains 3-63 characters\n 2. Starts and ends with alphanumeric character\n 3. Contains only alphanumeric characters, underscores, or hyphens.\n\n Args:\n name (str): Collection name to validate\n min_length (int): Minimum length of the name\n max_length (int): Maximum length of the name\n\n Returns:\n bool: True if valid, False otherwise\n \"\"\"\n # Check length (condition 1)\n if not (min_length <= len(name) <= max_length):\n return False\n\n # Check start/end with alphanumeric (condition 2)\n if not (name[0].isalnum() and name[-1].isalnum()):\n return False\n\n # Check allowed characters (condition 3)\n return re.match(r\"^[a-zA-Z0-9_-]+$\", name) is not None\n\n # ---------------------------------------------------------------------\n # OUTPUT METHODS\n # ---------------------------------------------------------------------\n def build_kb_info(self) -> Data:\n \"\"\"Main ingestion routine → returns a dict with KB metadata.\"\"\"\n try:\n # Get source DataFrame\n df_source: pd.DataFrame = self.input_df\n\n # Validate column configuration (using Structured Output patterns)\n config_list = self._validate_column_config(df_source)\n column_metadata = self._build_column_metadata(config_list, df_source)\n\n # Prepare KB folder (using File Component patterns)\n kb_root = self._get_kb_root()\n kb_path = kb_root / self.knowledge_base\n\n # Read the embedding info from the knowledge base folder\n metadata_path = kb_path / \"embedding_metadata.json\"\n\n # If the API key is not provided, try to read it from the metadata file\n if metadata_path.exists():\n settings_service = get_settings_service()\n metadata = json.loads(metadata_path.read_text())\n embedding_model = metadata.get(\"embedding_model\")\n try:\n api_key = decrypt_api_key(metadata[\"api_key\"], settings_service)\n except (InvalidToken, TypeError, ValueError) as e:\n logger.error(f\"Could not decrypt API key. Please provide it manually. Error: {e}\")\n\n # Check if a custom API key was provided, update metadata if so\n if self.api_key:\n api_key = self.api_key\n self._save_embedding_metadata(\n kb_path=kb_path,\n embedding_model=embedding_model,\n api_key=api_key,\n )\n\n # Create vector store following Local DB component pattern\n self._create_vector_store(df_source, config_list, embedding_model=embedding_model, api_key=api_key)\n\n # Save KB files (using File Component storage patterns)\n self._save_kb_files(kb_path, config_list)\n\n # Build metadata response\n meta: dict[str, Any] = {\n \"kb_id\": str(uuid.uuid4()),\n \"kb_name\": self.knowledge_base,\n \"rows\": len(df_source),\n \"column_metadata\": column_metadata,\n \"path\": str(kb_path),\n \"config_columns\": len(config_list),\n \"timestamp\": datetime.now(tz=timezone.utc).isoformat(),\n }\n\n # Set status message\n self.status = f\"✅ KB **{self.knowledge_base}** saved · {len(df_source)} chunks.\"\n\n return Data(data=meta)\n\n except Exception as e:\n if not self.silent_errors:\n raise\n self.log(f\"Error in KB ingestion: {e}\")\n self.status = f\"❌ KB ingestion failed: {e}\"\n return Data(data={\"error\": str(e), \"kb_name\": self.knowledge_base})\n\n def _get_knowledge_bases(self) -> list[str]:\n \"\"\"Retrieve a list of available knowledge bases.\n\n Returns:\n A list of knowledge base names.\n \"\"\"\n # Return the list of directories in the knowledge base root path\n kb_root_path = self._get_kb_root()\n\n if not kb_root_path.exists():\n return []\n\n return [str(d.name) for d in kb_root_path.iterdir() if not d.name.startswith(\".\") and d.is_dir()]\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n \"\"\"Update build configuration based on provider selection.\"\"\"\n # Create a new knowledge base\n if field_name == \"knowledge_base\":\n if isinstance(field_value, dict) and \"01_new_kb_name\" in field_value:\n # Validate the knowledge base name - Make sure it follows these rules:\n if not self.is_valid_collection_name(field_value[\"01_new_kb_name\"]):\n msg = f\"Invalid knowledge base name: {field_value['01_new_kb_name']}\"\n raise ValueError(msg)\n\n # We need to test the API Key one time against the embedding model\n embed_model = self._build_embeddings(\n embedding_model=field_value[\"02_embedding_model\"], api_key=field_value[\"03_api_key\"]\n )\n\n # Try to generate a dummy embedding to validate the API key\n embed_model.embed_query(\"test\")\n\n # Create the new knowledge base directory\n kb_path = KNOWLEDGE_BASES_ROOT_PATH / field_value[\"01_new_kb_name\"]\n kb_path.mkdir(parents=True, exist_ok=True)\n\n # Save the embedding metadata\n build_config[\"knowledge_base\"][\"value\"] = field_value[\"01_new_kb_name\"]\n self._save_embedding_metadata(\n kb_path=kb_path,\n embedding_model=field_value[\"02_embedding_model\"],\n api_key=field_value[\"03_api_key\"],\n )\n\n # Update the knowledge base options dynamically\n build_config[\"knowledge_base\"][\"options\"] = self._get_knowledge_bases()\n if build_config[\"knowledge_base\"][\"value\"] not in build_config[\"knowledge_base\"][\"options\"]:\n build_config[\"knowledge_base\"][\"value\"] = None\n\n return build_config\n"
799
+ },
800
+ "column_config": {
801
+ "_input_type": "TableInput",
802
+ "advanced": false,
803
+ "display_name": "Column Configuration",
804
+ "dynamic": false,
805
+ "info": "Configure column behavior for the knowledge base.",
806
+ "is_list": true,
807
+ "list_add_label": "Add More",
808
+ "name": "column_config",
809
+ "placeholder": "",
810
+ "required": true,
811
+ "show": true,
812
+ "table_icon": "Table",
813
+ "table_schema": {
814
+ "columns": [
815
+ {
816
+ "default": "None",
817
+ "description": "Name of the column in the source DataFrame",
818
+ "disable_edit": false,
819
+ "display_name": "Column Name",
820
+ "edit_mode": "inline",
821
+ "filterable": true,
822
+ "formatter": "text",
823
+ "hidden": false,
824
+ "name": "column_name",
825
+ "sortable": true,
826
+ "type": "str"
827
+ },
828
+ {
829
+ "default": false,
830
+ "description": "Create embeddings for this column",
831
+ "disable_edit": false,
832
+ "display_name": "Vectorize",
833
+ "edit_mode": "inline",
834
+ "filterable": true,
835
+ "formatter": "boolean",
836
+ "hidden": false,
837
+ "name": "vectorize",
838
+ "sortable": true,
839
+ "type": "boolean"
840
+ },
841
+ {
842
+ "default": false,
843
+ "description": "Use this column as unique identifier",
844
+ "disable_edit": false,
845
+ "display_name": "Identifier",
846
+ "edit_mode": "inline",
847
+ "filterable": true,
848
+ "formatter": "boolean",
849
+ "hidden": false,
850
+ "name": "identifier",
851
+ "sortable": true,
852
+ "type": "boolean"
853
+ }
854
+ ]
855
+ },
856
+ "title_case": false,
857
+ "tool_mode": false,
858
+ "trace_as_metadata": true,
859
+ "trigger_icon": "Table",
860
+ "trigger_text": "Open table",
861
+ "type": "table",
862
+ "value": [
863
+ {
864
+ "column_name": "text",
865
+ "identifier": false,
866
+ "vectorize": true
867
+ }
868
+ ]
869
+ },
870
+ "input_df": {
871
+ "_input_type": "DataFrameInput",
872
+ "advanced": false,
873
+ "display_name": "Data",
874
+ "dynamic": false,
875
+ "info": "Table with all original columns (already chunked / processed).",
876
+ "input_types": [
877
+ "DataFrame"
878
+ ],
879
+ "list": false,
880
+ "list_add_label": "Add More",
881
+ "name": "input_df",
882
+ "placeholder": "",
883
+ "required": true,
884
+ "show": true,
885
+ "title_case": false,
886
+ "tool_mode": false,
887
+ "trace_as_input": true,
888
+ "trace_as_metadata": true,
889
+ "type": "other",
890
+ "value": ""
891
+ },
892
+ "knowledge_base": {
893
+ "_input_type": "DropdownInput",
894
+ "advanced": false,
895
+ "combobox": false,
896
+ "dialog_inputs": {
897
+ "fields": {
898
+ "data": {
899
+ "node": {
900
+ "description": "Create new knowledge in Langflow.",
901
+ "display_name": "Create new knowledge",
902
+ "field_order": [
903
+ "01_new_kb_name",
904
+ "02_embedding_model",
905
+ "03_api_key"
906
+ ],
907
+ "name": "create_knowledge_base",
908
+ "template": {
909
+ "01_new_kb_name": {
910
+ "_input_type": "StrInput",
911
+ "advanced": false,
912
+ "display_name": "Knowledge Name",
913
+ "dynamic": false,
914
+ "info": "Name of the new knowledge to create.",
915
+ "list": false,
916
+ "list_add_label": "Add More",
917
+ "load_from_db": false,
918
+ "name": "new_kb_name",
919
+ "placeholder": "",
920
+ "required": true,
921
+ "show": true,
922
+ "title_case": false,
923
+ "tool_mode": false,
924
+ "trace_as_metadata": true,
925
+ "type": "str",
926
+ "value": ""
927
+ },
928
+ "02_embedding_model": {
929
+ "_input_type": "DropdownInput",
930
+ "advanced": false,
931
+ "combobox": false,
932
+ "dialog_inputs": {},
933
+ "display_name": "Model Name",
934
+ "dynamic": false,
935
+ "info": "Select the embedding model to use for this knowledge base.",
936
+ "name": "embedding_model",
937
+ "options": [
938
+ "text-embedding-3-small",
939
+ "text-embedding-3-large",
940
+ "text-embedding-ada-002",
941
+ "sentence-transformers/all-MiniLM-L6-v2",
942
+ "sentence-transformers/all-mpnet-base-v2",
943
+ "embed-english-v3.0",
944
+ "embed-multilingual-v3.0"
945
+ ],
946
+ "options_metadata": [
947
+ {
948
+ "icon": "OpenAI"
949
+ },
950
+ {
951
+ "icon": "OpenAI"
952
+ },
953
+ {
954
+ "icon": "OpenAI"
955
+ },
956
+ {
957
+ "icon": "HuggingFace"
958
+ },
959
+ {
960
+ "icon": "HuggingFace"
961
+ },
962
+ {
963
+ "icon": "Cohere"
964
+ },
965
+ {
966
+ "icon": "Cohere"
967
+ }
968
+ ],
969
+ "placeholder": "",
970
+ "required": true,
971
+ "show": true,
972
+ "title_case": false,
973
+ "toggle": false,
974
+ "tool_mode": false,
975
+ "trace_as_metadata": true,
976
+ "type": "str",
977
+ "value": ""
978
+ },
979
+ "03_api_key": {
980
+ "_input_type": "SecretStrInput",
981
+ "advanced": false,
982
+ "display_name": "API Key",
983
+ "dynamic": false,
984
+ "info": "Provider API key for embedding model",
985
+ "input_types": [],
986
+ "load_from_db": true,
987
+ "name": "api_key",
988
+ "password": true,
989
+ "placeholder": "",
990
+ "required": true,
991
+ "show": true,
992
+ "title_case": false,
993
+ "type": "str",
994
+ "value": ""
995
+ }
996
+ }
997
+ }
998
+ }
999
+ },
1000
+ "functionality": "create"
1001
+ },
1002
+ "display_name": "Knowledge",
1003
+ "dynamic": false,
1004
+ "info": "Select the knowledge to load data from.",
1005
+ "name": "knowledge_base",
1006
+ "options": [],
1007
+ "options_metadata": [],
1008
+ "placeholder": "",
1009
+ "refresh_button": true,
1010
+ "required": true,
1011
+ "show": true,
1012
+ "title_case": false,
1013
+ "toggle": false,
1014
+ "tool_mode": false,
1015
+ "trace_as_metadata": true,
1016
+ "type": "str",
1017
+ "value": null
1018
+ }
1019
+ },
1020
+ "tool_mode": false
1021
+ },
1022
+ "showNode": true,
1023
+ "type": "KBIngestion"
1024
+ },
1025
+ "dragging": false,
1026
+ "id": "KBIngestion-jj5iW",
1027
+ "measured": {
1028
+ "height": 333,
1029
+ "width": 320
1030
+ },
1031
+ "position": {
1032
+ "x": 1000.4023842644599,
1033
+ "y": 101.77068666606948
1034
+ },
1035
+ "selected": false,
1036
+ "type": "genericNode"
1037
+ }
1038
+ ],
1039
+ "viewport": {
1040
+ "x": 280.03407172860966,
1041
+ "y": 131.39479654897661,
1042
+ "zoom": 0.9295918751284687
1043
+ }
1044
+ },
1045
+ "description": "An example of creating a Knowledge Base and ingesting data into it from a web URL.",
1046
+ "endpoint_name": null,
1047
+ "id": "dfffa40b-547b-46ae-9c4a-6539851990bf",
1048
+ "is_component": false,
1049
+ "last_tested_version": "1.5.0.post1",
1050
+ "name": "Knowledge Ingestion",
1051
+ "tags": []
1052
+ }