lfx-nightly 0.1.12.dev12__py3-none-any.whl → 0.1.12.dev13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lfx-nightly might be problematic. Click here for more details.

@@ -1,243 +1,778 @@
1
+ from __future__ import annotations
2
+
1
3
  import json
4
+ import uuid
2
5
  from typing import Any
3
6
 
4
- from langchain_community.vectorstores import OpenSearchVectorSearch
7
+ from opensearchpy import OpenSearch, helpers
5
8
 
6
9
  from lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store
7
10
  from lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection
8
- from lfx.io import (
9
- BoolInput,
10
- DropdownInput,
11
- FloatInput,
12
- HandleInput,
13
- IntInput,
14
- MultilineInput,
15
- SecretStrInput,
16
- StrInput,
17
- )
11
+ from lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput
12
+ from lfx.log import logger
18
13
  from lfx.schema.data import Data
19
14
 
20
15
 
21
16
  @vector_store_connection
22
17
  class OpenSearchVectorStoreComponent(LCVectorStoreComponent):
23
- """OpenSearch Vector Store with advanced, customizable search capabilities."""
18
+ """OpenSearch Vector Store Component with Hybrid Search Capabilities.
19
+
20
+ This component provides vector storage and retrieval using OpenSearch, combining semantic
21
+ similarity search (KNN) with keyword-based search for optimal results. It supports document
22
+ ingestion, vector embeddings, and advanced filtering with authentication options.
23
+
24
+ Features:
25
+ - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)
26
+ - Hybrid search combining KNN vector similarity and keyword matching
27
+ - Flexible authentication (Basic auth, JWT tokens)
28
+ - Advanced filtering and aggregations
29
+ - Metadata injection during document ingestion
30
+ """
24
31
 
25
32
  display_name: str = "OpenSearch"
26
- description: str = "OpenSearch Vector Store with advanced, customizable search capabilities."
27
- name = "OpenSearch"
28
- icon = "OpenSearch"
33
+ icon: str = "OpenSearch"
34
+ description: str = (
35
+ "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities."
36
+ )
37
+
38
+ # Keys we consider baseline
39
+ default_keys: list[str] = [
40
+ "opensearch_url",
41
+ "index_name",
42
+ *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.
43
+ "embedding",
44
+ "vector_field",
45
+ "number_of_results",
46
+ "auth_mode",
47
+ "username",
48
+ "password",
49
+ "jwt_token",
50
+ "jwt_header",
51
+ "bearer_prefix",
52
+ "use_ssl",
53
+ "verify_certs",
54
+ "filter_expression",
55
+ "engine",
56
+ "space_type",
57
+ "ef_construction",
58
+ "m",
59
+ "docs_metadata",
60
+ ]
29
61
 
30
62
  inputs = [
63
+ TableInput(
64
+ name="docs_metadata",
65
+ display_name="Document Metadata",
66
+ info=(
67
+ "Additional metadata key-value pairs to be added to all ingested documents. "
68
+ "Useful for tagging documents with source information, categories, or other custom attributes."
69
+ ),
70
+ table_schema=[
71
+ {
72
+ "name": "key",
73
+ "display_name": "Key",
74
+ "type": "str",
75
+ "description": "Key name",
76
+ },
77
+ {
78
+ "name": "value",
79
+ "display_name": "Value",
80
+ "type": "str",
81
+ "description": "Value of the metadata",
82
+ },
83
+ ],
84
+ value=[],
85
+ advanced=True,
86
+ ),
31
87
  StrInput(
32
88
  name="opensearch_url",
33
89
  display_name="OpenSearch URL",
34
90
  value="http://localhost:9200",
35
- info="URL for OpenSearch cluster (e.g. https://192.168.1.1:9200).",
91
+ info=(
92
+ "The connection URL for your OpenSearch cluster "
93
+ "(e.g., http://localhost:9200 for local development or your cloud endpoint)."
94
+ ),
36
95
  ),
37
96
  StrInput(
38
97
  name="index_name",
39
98
  display_name="Index Name",
40
99
  value="langflow",
41
- info="The index name where the vectors will be stored in OpenSearch cluster.",
100
+ info=(
101
+ "The OpenSearch index name where documents will be stored and searched. "
102
+ "Will be created automatically if it doesn't exist."
103
+ ),
42
104
  ),
43
- *LCVectorStoreComponent.inputs,
44
- HandleInput(name="embedding", display_name="Embedding", input_types=["Embeddings"]),
45
105
  DropdownInput(
46
- name="search_type",
47
- display_name="Search Type",
48
- options=["similarity", "similarity_score_threshold", "mmr"],
49
- value="similarity",
106
+ name="engine",
107
+ display_name="Vector Engine",
108
+ options=["jvector", "nmslib", "faiss", "lucene"],
109
+ value="jvector",
110
+ info=(
111
+ "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. "
112
+ "Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'."
113
+ ),
114
+ advanced=True,
115
+ ),
116
+ DropdownInput(
117
+ name="space_type",
118
+ display_name="Distance Metric",
119
+ options=["l2", "l1", "cosinesimil", "linf", "innerproduct"],
120
+ value="l2",
121
+ info=(
122
+ "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, "
123
+ "'cosinesimil' for cosine similarity, 'innerproduct' for dot product."
124
+ ),
50
125
  advanced=True,
51
126
  ),
52
127
  IntInput(
53
- name="number_of_results",
54
- display_name="Number of Results",
55
- info="Number of results to return.",
128
+ name="ef_construction",
129
+ display_name="EF Construction",
130
+ value=512,
131
+ info=(
132
+ "Size of the dynamic candidate list during index construction. "
133
+ "Higher values improve recall but increase indexing time and memory usage."
134
+ ),
135
+ advanced=True,
136
+ ),
137
+ IntInput(
138
+ name="m",
139
+ display_name="M Parameter",
140
+ value=16,
141
+ info=(
142
+ "Number of bidirectional connections for each vector in the HNSW graph. "
143
+ "Higher values improve search quality but increase memory usage and indexing time."
144
+ ),
56
145
  advanced=True,
57
- value=4,
58
146
  ),
59
- FloatInput(
60
- name="search_score_threshold",
61
- display_name="Search Score Threshold",
62
- info="Minimum similarity score threshold for search results.",
63
- value=0.0,
147
+ *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.
148
+ HandleInput(name="embedding", display_name="Embedding", input_types=["Embeddings"]),
149
+ StrInput(
150
+ name="vector_field",
151
+ display_name="Vector Field Name",
152
+ value="chunk_embedding",
64
153
  advanced=True,
154
+ info="Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.",
155
+ ),
156
+ IntInput(
157
+ name="number_of_results",
158
+ display_name="Default Result Limit",
159
+ value=10,
160
+ advanced=True,
161
+ info=(
162
+ "Default maximum number of search results to return when no limit is "
163
+ "specified in the filter expression."
164
+ ),
165
+ ),
166
+ MultilineInput(
167
+ name="filter_expression",
168
+ display_name="Search Filters (JSON)",
169
+ value="",
170
+ info=(
171
+ "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\n"
172
+ "Format 1 - Explicit filters:\n"
173
+ '{"filter": [{"term": {"filename":"doc.pdf"}}, '
174
+ '{"terms":{"owner":["user1","user2"]}}], "limit": 10, "score_threshold": 1.6}\n\n'
175
+ "Format 2 - Context-style mapping:\n"
176
+ '{"data_sources":["file.pdf"], "document_types":["application/pdf"], "owners":["user123"]}\n\n'
177
+ "Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters."
178
+ ),
179
+ ),
180
+ # ----- Auth controls (dynamic) -----
181
+ DropdownInput(
182
+ name="auth_mode",
183
+ display_name="Authentication Mode",
184
+ value="basic",
185
+ options=["basic", "jwt"],
186
+ info=(
187
+ "Authentication method: 'basic' for username/password authentication, "
188
+ "or 'jwt' for JSON Web Token (Bearer) authentication."
189
+ ),
190
+ real_time_refresh=True,
191
+ advanced=False,
65
192
  ),
66
193
  StrInput(
67
194
  name="username",
68
195
  display_name="Username",
69
196
  value="admin",
70
- advanced=True,
197
+ show=False,
71
198
  ),
72
199
  SecretStrInput(
73
200
  name="password",
74
201
  display_name="Password",
75
202
  value="admin",
203
+ show=False,
204
+ ),
205
+ SecretStrInput(
206
+ name="jwt_token",
207
+ display_name="JWT Token",
208
+ value="JWT",
209
+ load_from_db=True,
210
+ show=True,
211
+ info=(
212
+ "Valid JSON Web Token for authentication. "
213
+ "Will be sent in the Authorization header (with optional 'Bearer ' prefix)."
214
+ ),
215
+ ),
216
+ StrInput(
217
+ name="jwt_header",
218
+ display_name="JWT Header Name",
219
+ value="Authorization",
220
+ show=False,
76
221
  advanced=True,
77
222
  ),
223
+ BoolInput(
224
+ name="bearer_prefix",
225
+ display_name="Prefix 'Bearer '",
226
+ value=True,
227
+ show=False,
228
+ advanced=True,
229
+ ),
230
+ # ----- TLS -----
78
231
  BoolInput(
79
232
  name="use_ssl",
80
- display_name="Use SSL",
233
+ display_name="Use SSL/TLS",
81
234
  value=True,
82
235
  advanced=True,
236
+ info="Enable SSL/TLS encryption for secure connections to OpenSearch.",
83
237
  ),
84
238
  BoolInput(
85
239
  name="verify_certs",
86
- display_name="Verify Certificates",
240
+ display_name="Verify SSL Certificates",
87
241
  value=False,
88
242
  advanced=True,
89
- ),
90
- MultilineInput(
91
- name="hybrid_search_query",
92
- display_name="Hybrid Search Query",
93
- value="",
94
- advanced=True,
95
243
  info=(
96
- "Provide a custom hybrid search query in JSON format. This allows you to combine "
97
- "vector similarity and keyword matching."
244
+ "Verify SSL certificates when connecting. "
245
+ "Disable for self-signed certificates in development environments."
98
246
  ),
99
247
  ),
100
248
  ]
101
249
 
102
- @check_cached_vector_store
103
- def build_vector_store(self) -> OpenSearchVectorSearch:
104
- """Builds the OpenSearch Vector Store object."""
105
- try:
106
- from langchain_community.vectorstores import OpenSearchVectorSearch
107
- except ImportError as e:
108
- error_message = f"Failed to import required modules: {e}"
109
- self.log(error_message)
110
- raise ImportError(error_message) from e
250
+ # ---------- helper functions for index management ----------
251
+ def _default_text_mapping(
252
+ self,
253
+ dim: int,
254
+ engine: str = "jvector",
255
+ space_type: str = "l2",
256
+ ef_search: int = 512,
257
+ ef_construction: int = 100,
258
+ m: int = 16,
259
+ vector_field: str = "vector_field",
260
+ ) -> dict[str, Any]:
261
+ """Create the default OpenSearch index mapping for vector search.
111
262
 
112
- try:
113
- opensearch = OpenSearchVectorSearch(
114
- index_name=self.index_name,
115
- embedding_function=self.embedding,
116
- opensearch_url=self.opensearch_url,
117
- http_auth=(self.username, self.password),
118
- use_ssl=self.use_ssl,
119
- verify_certs=self.verify_certs,
120
- ssl_assert_hostname=False,
121
- ssl_show_warn=False,
122
- )
123
- except Exception as e:
124
- error_message = f"Failed to create OpenSearchVectorSearch instance: {e}"
125
- self.log(error_message)
126
- raise RuntimeError(error_message) from e
263
+ This method generates the index configuration with k-NN settings optimized
264
+ for approximate nearest neighbor search using the specified vector engine.
265
+
266
+ Args:
267
+ dim: Dimensionality of the vector embeddings
268
+ engine: Vector search engine (jvector, nmslib, faiss, lucene)
269
+ space_type: Distance metric for similarity calculation
270
+ ef_search: Size of dynamic list used during search
271
+ ef_construction: Size of dynamic list used during index construction
272
+ m: Number of bidirectional links for each vector
273
+ vector_field: Name of the field storing vector embeddings
274
+
275
+ Returns:
276
+ Dictionary containing OpenSearch index mapping configuration
277
+ """
278
+ return {
279
+ "settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}},
280
+ "mappings": {
281
+ "properties": {
282
+ vector_field: {
283
+ "type": "knn_vector",
284
+ "dimension": dim,
285
+ "method": {
286
+ "name": "disk_ann",
287
+ "space_type": space_type,
288
+ "engine": engine,
289
+ "parameters": {"ef_construction": ef_construction, "m": m},
290
+ },
291
+ }
292
+ }
293
+ },
294
+ }
295
+
296
+ def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:
297
+ """Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).
298
+
299
+ Amazon OpenSearch Serverless has restrictions on which vector engines
300
+ can be used. This method ensures the selected engine is compatible.
301
+
302
+ Args:
303
+ is_aoss: Whether the connection is to Amazon OpenSearch Serverless
304
+ engine: The selected vector search engine
305
+
306
+ Raises:
307
+ ValueError: If AOSS is used with an incompatible engine
308
+ """
309
+ if is_aoss and engine not in {"nmslib", "faiss"}:
310
+ msg = "Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines"
311
+ raise ValueError(msg)
127
312
 
128
- if self.ingest_data:
129
- self._add_documents_to_vector_store(opensearch)
313
+ def _is_aoss_enabled(self, http_auth: Any) -> bool:
314
+ """Determine if Amazon OpenSearch Serverless (AOSS) is being used.
130
315
 
131
- return opensearch
316
+ Args:
317
+ http_auth: The HTTP authentication object
132
318
 
133
- def _add_documents_to_vector_store(self, vector_store: "OpenSearchVectorSearch") -> None:
134
- """Adds documents to the Vector Store."""
319
+ Returns:
320
+ True if AOSS is enabled, False otherwise
321
+ """
322
+ return http_auth is not None and hasattr(http_auth, "service") and http_auth.service == "aoss"
323
+
324
+ def _bulk_ingest_embeddings(
325
+ self,
326
+ client: OpenSearch,
327
+ index_name: str,
328
+ embeddings: list[list[float]],
329
+ texts: list[str],
330
+ metadatas: list[dict] | None = None,
331
+ ids: list[str] | None = None,
332
+ vector_field: str = "vector_field",
333
+ text_field: str = "text",
334
+ mapping: dict | None = None,
335
+ max_chunk_bytes: int | None = 1 * 1024 * 1024,
336
+ *,
337
+ is_aoss: bool = False,
338
+ ) -> list[str]:
339
+ """Efficiently ingest multiple documents with embeddings into OpenSearch.
340
+
341
+ This method uses bulk operations to insert documents with their vector
342
+ embeddings and metadata into the specified OpenSearch index.
343
+
344
+ Args:
345
+ client: OpenSearch client instance
346
+ index_name: Target index for document storage
347
+ embeddings: List of vector embeddings for each document
348
+ texts: List of document texts
349
+ metadatas: Optional metadata dictionaries for each document
350
+ ids: Optional document IDs (UUIDs generated if not provided)
351
+ vector_field: Field name for storing vector embeddings
352
+ text_field: Field name for storing document text
353
+ mapping: Optional index mapping configuration
354
+ max_chunk_bytes: Maximum size per bulk request chunk
355
+ is_aoss: Whether using Amazon OpenSearch Serverless
356
+
357
+ Returns:
358
+ List of document IDs that were successfully ingested
359
+ """
360
+ if not mapping:
361
+ mapping = {}
362
+
363
+ requests = []
364
+ return_ids = []
365
+
366
+ for i, text in enumerate(texts):
367
+ metadata = metadatas[i] if metadatas else {}
368
+ _id = ids[i] if ids else str(uuid.uuid4())
369
+ request = {
370
+ "_op_type": "index",
371
+ "_index": index_name,
372
+ vector_field: embeddings[i],
373
+ text_field: text,
374
+ **metadata,
375
+ }
376
+ if is_aoss:
377
+ request["id"] = _id
378
+ else:
379
+ request["_id"] = _id
380
+ requests.append(request)
381
+ return_ids.append(_id)
382
+ if metadatas:
383
+ self.log(f"Sample metadata: {metadatas[0] if metadatas else {}}")
384
+ helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
385
+ return return_ids
386
+
387
+ # ---------- auth / client ----------
388
+ def _build_auth_kwargs(self) -> dict[str, Any]:
389
+ """Build authentication configuration for OpenSearch client.
390
+
391
+ Constructs the appropriate authentication parameters based on the
392
+ selected auth mode (basic username/password or JWT token).
393
+
394
+ Returns:
395
+ Dictionary containing authentication configuration
396
+
397
+ Raises:
398
+ ValueError: If required authentication parameters are missing
399
+ """
400
+ mode = (self.auth_mode or "basic").strip().lower()
401
+ if mode == "jwt":
402
+ token = (self.jwt_token or "").strip()
403
+ if not token:
404
+ msg = "Auth Mode is 'jwt' but no jwt_token was provided."
405
+ raise ValueError(msg)
406
+ header_name = (self.jwt_header or "Authorization").strip()
407
+ header_value = f"Bearer {token}" if self.bearer_prefix else token
408
+ return {"headers": {header_name: header_value}}
409
+ user = (self.username or "").strip()
410
+ pwd = (self.password or "").strip()
411
+ if not user or not pwd:
412
+ msg = "Auth Mode is 'basic' but username/password are missing."
413
+ raise ValueError(msg)
414
+ return {"http_auth": (user, pwd)}
415
+
416
+ def build_client(self) -> OpenSearch:
417
+ """Create and configure an OpenSearch client instance.
418
+
419
+ Returns:
420
+ Configured OpenSearch client ready for operations
421
+ """
422
+ auth_kwargs = self._build_auth_kwargs()
423
+ return OpenSearch(
424
+ hosts=[self.opensearch_url],
425
+ use_ssl=self.use_ssl,
426
+ verify_certs=self.verify_certs,
427
+ ssl_assert_hostname=False,
428
+ ssl_show_warn=False,
429
+ **auth_kwargs,
430
+ )
431
+
432
+ @check_cached_vector_store
433
+ def build_vector_store(self) -> OpenSearch:
434
+ # Return raw OpenSearch client as our “vector store.”
435
+ self.log(self.ingest_data)
436
+ client = self.build_client()
437
+ self._add_documents_to_vector_store(client=client)
438
+ return client
439
+
440
+ # ---------- ingest ----------
441
+ def _add_documents_to_vector_store(self, client: OpenSearch) -> None:
442
+ """Process and ingest documents into the OpenSearch vector store.
443
+
444
+ This method handles the complete document ingestion pipeline:
445
+ - Prepares document data and metadata
446
+ - Generates vector embeddings
447
+ - Creates appropriate index mappings
448
+ - Bulk inserts documents with vectors
449
+
450
+ Args:
451
+ client: OpenSearch client for performing operations
452
+ """
135
453
  # Convert DataFrame to Data if needed using parent's method
136
454
  self.ingest_data = self._prepare_ingest_data()
137
455
 
138
- documents = []
139
- for _input in self.ingest_data or []:
140
- if isinstance(_input, Data):
141
- documents.append(_input.to_lc_document())
142
- else:
143
- error_message = f"Expected Data object, got {type(_input)}"
144
- self.log(error_message)
145
- raise TypeError(error_message)
456
+ docs = self.ingest_data or []
457
+ if not docs:
458
+ self.log("No documents to ingest.")
459
+ return
460
+
461
+ # Extract texts and metadata from documents
462
+ texts = []
463
+ metadatas = []
464
+ # Process docs_metadata table input into a dict
465
+ additional_metadata = {}
466
+ if hasattr(self, "docs_metadata") and self.docs_metadata:
467
+ for item in self.docs_metadata:
468
+ if isinstance(item, dict) and "key" in item and "value" in item:
469
+ additional_metadata[item["key"]] = item["value"]
470
+
471
+ for doc_obj in docs:
472
+ data_copy = json.loads(doc_obj.model_dump_json())
473
+ text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)
474
+ texts.append(text)
475
+
476
+ # Merge additional metadata from table input
477
+ data_copy.update(additional_metadata)
478
+
479
+ metadatas.append(data_copy)
480
+ self.log(metadatas)
481
+ if not self.embedding:
482
+ msg = "Embedding handle is required to embed documents."
483
+ raise ValueError(msg)
484
+
485
+ # Generate embeddings
486
+ vectors = self.embedding.embed_documents(texts)
487
+
488
+ if not vectors:
489
+ self.log("No vectors generated from documents.")
490
+ return
491
+
492
+ # Get vector dimension for mapping
493
+ dim = len(vectors[0]) if vectors else 768 # default fallback
494
+
495
+ # Check for AOSS
496
+ auth_kwargs = self._build_auth_kwargs()
497
+ is_aoss = self._is_aoss_enabled(auth_kwargs.get("http_auth"))
498
+
499
+ # Validate engine with AOSS
500
+ engine = getattr(self, "engine", "jvector")
501
+ self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)
502
+
503
+ # Create mapping with proper KNN settings
504
+ space_type = getattr(self, "space_type", "l2")
505
+ ef_construction = getattr(self, "ef_construction", 512)
506
+ m = getattr(self, "m", 16)
507
+
508
+ mapping = self._default_text_mapping(
509
+ dim=dim,
510
+ engine=engine,
511
+ space_type=space_type,
512
+ ef_construction=ef_construction,
513
+ m=m,
514
+ vector_field=self.vector_field,
515
+ )
516
+
517
+ self.log(f"Indexing {len(texts)} documents into '{self.index_name}' with proper KNN mapping...")
518
+
519
+ # Use the LangChain-style bulk ingestion
520
+ return_ids = self._bulk_ingest_embeddings(
521
+ client=client,
522
+ index_name=self.index_name,
523
+ embeddings=vectors,
524
+ texts=texts,
525
+ metadatas=metadatas,
526
+ vector_field=self.vector_field,
527
+ text_field="text",
528
+ mapping=mapping,
529
+ is_aoss=is_aoss,
530
+ )
531
+ self.log(metadatas)
146
532
 
147
- if documents and self.embedding is not None:
148
- self.log(f"Adding {len(documents)} documents to the Vector Store.")
533
+ self.log(f"Successfully indexed {len(return_ids)} documents.")
534
+
535
+ # ---------- helpers for filters ----------
536
+ def _is_placeholder_term(self, term_obj: dict) -> bool:
537
+ # term_obj like {"filename": "__IMPOSSIBLE_VALUE__"}
538
+ return any(v == "__IMPOSSIBLE_VALUE__" for v in term_obj.values())
539
+
540
+ def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:
541
+ """Convert filter expressions into OpenSearch-compatible filter clauses.
542
+
543
+ This method accepts two filter formats and converts them to standardized
544
+ OpenSearch query clauses:
545
+
546
+ Format A - Explicit filters:
547
+ {"filter": [{"term": {"field": "value"}}, {"terms": {"field": ["val1", "val2"]}}],
548
+ "limit": 10, "score_threshold": 1.5}
549
+
550
+ Format B - Context-style mapping:
551
+ {"data_sources": ["file1.pdf"], "document_types": ["pdf"], "owners": ["user1"]}
552
+
553
+ Args:
554
+ filter_obj: Filter configuration dictionary or None
555
+
556
+ Returns:
557
+ List of OpenSearch filter clauses (term/terms objects)
558
+ Placeholder values with "__IMPOSSIBLE_VALUE__" are ignored
559
+ """
560
+ if not filter_obj:
561
+ return []
562
+
563
+ # If it is a string, try to parse it once
564
+ if isinstance(filter_obj, str):
149
565
  try:
150
- vector_store.add_documents(documents)
151
- except Exception as e:
152
- error_message = f"Error adding documents to Vector Store: {e}"
153
- self.log(error_message)
154
- raise RuntimeError(error_message) from e
155
- else:
156
- self.log("No documents to add to the Vector Store.")
566
+ filter_obj = json.loads(filter_obj)
567
+ except json.JSONDecodeError:
568
+ # Not valid JSON - treat as no filters
569
+ return []
570
+
571
+ # Case A: already an explicit list/dict under "filter"
572
+ if "filter" in filter_obj:
573
+ raw = filter_obj["filter"]
574
+ if isinstance(raw, dict):
575
+ raw = [raw]
576
+ explicit_clauses: list[dict] = []
577
+ for f in raw or []:
578
+ if "term" in f and isinstance(f["term"], dict) and not self._is_placeholder_term(f["term"]):
579
+ explicit_clauses.append(f)
580
+ elif "terms" in f and isinstance(f["terms"], dict):
581
+ field, vals = next(iter(f["terms"].items()))
582
+ if isinstance(vals, list) and len(vals) > 0:
583
+ explicit_clauses.append(f)
584
+ return explicit_clauses
157
585
 
586
+ # Case B: convert context-style maps into clauses
587
+ field_mapping = {
588
+ "data_sources": "filename",
589
+ "document_types": "mimetype",
590
+ "owners": "owner",
591
+ }
592
+ context_clauses: list[dict] = []
593
+ for k, values in filter_obj.items():
594
+ if not isinstance(values, list):
595
+ continue
596
+ field = field_mapping.get(k, k)
597
+ if len(values) == 0:
598
+ # Match-nothing placeholder (kept to mirror your tool semantics)
599
+ context_clauses.append({"term": {field: "__IMPOSSIBLE_VALUE__"}})
600
+ elif len(values) == 1:
601
+ if values[0] != "__IMPOSSIBLE_VALUE__":
602
+ context_clauses.append({"term": {field: values[0]}})
603
+ else:
604
+ context_clauses.append({"terms": {field: values}})
605
+ return context_clauses
606
+
607
+ # ---------- search (single hybrid path matching your tool) ----------
158
608
  def search(self, query: str | None = None) -> list[dict[str, Any]]:
159
- """Search for similar documents in the vector store or retrieve all documents if no query is provided."""
160
- try:
161
- vector_store = self.build_vector_store()
609
+ """Perform hybrid search combining vector similarity and keyword matching.
610
+
611
+ This method executes a sophisticated search that combines:
612
+ - K-nearest neighbor (KNN) vector similarity search (70% weight)
613
+ - Multi-field keyword search with fuzzy matching (30% weight)
614
+ - Optional filtering and score thresholds
615
+ - Aggregations for faceted search results
616
+
617
+ Args:
618
+ query: Search query string (used for both vector embedding and keyword search)
162
619
 
163
- query = query or ""
620
+ Returns:
621
+ List of search results with page_content, metadata, and relevance scores
622
+
623
+ Raises:
624
+ ValueError: If embedding component is not provided or filter JSON is invalid
625
+ """
626
+ logger.info(self.ingest_data)
627
+ client = self.build_client()
628
+ q = (query or "").strip()
629
+
630
+ # Parse optional filter expression (can be either A or B shape; see _coerce_filter_clauses)
631
+ filter_obj = None
632
+ if getattr(self, "filter_expression", "") and self.filter_expression.strip():
633
+ try:
634
+ filter_obj = json.loads(self.filter_expression)
635
+ except json.JSONDecodeError as e:
636
+ msg = f"Invalid filter_expression JSON: {e}"
637
+ raise ValueError(msg) from e
164
638
 
165
- if self.hybrid_search_query.strip():
166
- try:
167
- hybrid_query = json.loads(self.hybrid_search_query)
168
- except json.JSONDecodeError as e:
169
- error_message = f"Invalid hybrid search query JSON: {e}"
170
- self.log(error_message)
171
- raise ValueError(error_message) from e
639
+ if not self.embedding:
640
+ msg = "Embedding is required to run hybrid search (KNN + keyword)."
641
+ raise ValueError(msg)
172
642
 
173
- results = vector_store.client.search(index=self.index_name, body=hybrid_query)
643
+ # Embed the query
644
+ vec = self.embedding.embed_query(q)
174
645
 
175
- processed_results = []
176
- for hit in results.get("hits", {}).get("hits", []):
177
- source = hit.get("_source", {})
178
- text = source.get("text", "")
179
- metadata = source.get("metadata", {})
646
+ # Build filter clauses (accept both shapes)
647
+ filter_clauses = self._coerce_filter_clauses(filter_obj)
180
648
 
181
- if isinstance(text, dict):
182
- text = text.get("text", "")
649
+ # Respect the tool's limit/threshold defaults
650
+ limit = (filter_obj or {}).get("limit", self.number_of_results)
651
+ score_threshold = (filter_obj or {}).get("score_threshold", 0)
183
652
 
184
- processed_results.append(
653
+ # Build the same hybrid body as your SearchService
654
+ body = {
655
+ "query": {
656
+ "bool": {
657
+ "should": [
185
658
  {
186
- "page_content": text,
187
- "metadata": metadata,
188
- }
189
- )
190
- return processed_results
191
-
192
- search_kwargs = {"k": self.number_of_results}
193
- search_type = self.search_type.lower()
194
-
195
- if search_type == "similarity":
196
- results = vector_store.similarity_search(query, **search_kwargs)
197
- return [{"page_content": doc.page_content, "metadata": doc.metadata} for doc in results]
198
- if search_type == "similarity_score_threshold":
199
- search_kwargs["score_threshold"] = self.search_score_threshold
200
- results = vector_store.similarity_search_with_relevance_scores(query, **search_kwargs)
201
- return [
202
- {
203
- "page_content": doc.page_content,
204
- "metadata": doc.metadata,
205
- "score": score,
206
- }
207
- for doc, score in results
208
- ]
209
- if search_type == "mmr":
210
- results = vector_store.max_marginal_relevance_search(query, **search_kwargs)
211
- return [{"page_content": doc.page_content, "metadata": doc.metadata} for doc in results]
659
+ "knn": {
660
+ self.vector_field: {
661
+ "vector": vec,
662
+ "k": 10, # fixed to match the tool
663
+ "boost": 0.7,
664
+ }
665
+ }
666
+ },
667
+ {
668
+ "multi_match": {
669
+ "query": q,
670
+ "fields": ["text^2", "filename^1.5"],
671
+ "type": "best_fields",
672
+ "fuzziness": "AUTO",
673
+ "boost": 0.3,
674
+ }
675
+ },
676
+ ],
677
+ "minimum_should_match": 1,
678
+ }
679
+ },
680
+ "aggs": {
681
+ "data_sources": {"terms": {"field": "filename", "size": 20}},
682
+ "document_types": {"terms": {"field": "mimetype", "size": 10}},
683
+ "owners": {"terms": {"field": "owner", "size": 10}},
684
+ },
685
+ "_source": [
686
+ "filename",
687
+ "mimetype",
688
+ "page",
689
+ "text",
690
+ "source_url",
691
+ "owner",
692
+ "allowed_users",
693
+ "allowed_groups",
694
+ ],
695
+ "size": limit,
696
+ }
697
+ if filter_clauses:
698
+ body["query"]["bool"]["filter"] = filter_clauses
212
699
 
213
- except Exception as e:
214
- error_message = f"Error during search: {e}"
215
- self.log(error_message)
216
- raise RuntimeError(error_message) from e
700
+ if isinstance(score_threshold, (int, float)) and score_threshold > 0:
701
+ # top-level min_score (matches your tool)
702
+ body["min_score"] = score_threshold
217
703
 
218
- error_message = f"Error during search. Invalid search type: {self.search_type}"
219
- self.log(error_message)
220
- raise ValueError(error_message)
704
+ resp = client.search(index=self.index_name, body=body)
705
+ hits = resp.get("hits", {}).get("hits", [])
706
+ return [
707
+ {
708
+ "page_content": hit["_source"].get("text", ""),
709
+ "metadata": {k: v for k, v in hit["_source"].items() if k != "text"},
710
+ "score": hit.get("_score"),
711
+ }
712
+ for hit in hits
713
+ ]
221
714
 
222
715
  def search_documents(self) -> list[Data]:
223
- """Search for documents in the vector store based on the search input.
716
+ """Search documents and return results as Data objects.
717
+
718
+ This is the main interface method that performs the search using the
719
+ configured search_query and returns results in Langflow's Data format.
720
+
721
+ Returns:
722
+ List of Data objects containing search results with text and metadata
224
723
 
225
- If no search input is provided, retrieve all documents.
724
+ Raises:
725
+ Exception: If search operation fails
226
726
  """
227
727
  try:
228
- query = self.search_query.strip() if self.search_query else None
229
- results = self.search(query)
230
- retrieved_data = [
231
- Data(
232
- file_path=result["metadata"].get("file_path", ""),
233
- text=result["page_content"],
234
- )
235
- for result in results
236
- ]
728
+ raw = self.search(self.search_query or "")
729
+ return [Data(text=hit["page_content"], **hit["metadata"]) for hit in raw]
730
+ self.log(self.ingest_data)
237
731
  except Exception as e:
238
- error_message = f"Error during document search: {e}"
239
- self.log(error_message)
240
- raise RuntimeError(error_message) from e
732
+ self.log(f"search_documents error: {e}")
733
+ raise
734
+
735
+ # -------- dynamic UI handling (auth switch) --------
736
+ async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:
737
+ """Dynamically update component configuration based on field changes.
738
+
739
+ This method handles real-time UI updates, particularly for authentication
740
+ mode changes that show/hide relevant input fields.
741
+
742
+ Args:
743
+ build_config: Current component configuration
744
+ field_value: New value for the changed field
745
+ field_name: Name of the field that changed
746
+
747
+ Returns:
748
+ Updated build configuration with appropriate field visibility
749
+ """
750
+ try:
751
+ if field_name == "auth_mode":
752
+ mode = (field_value or "basic").strip().lower()
753
+ is_basic = mode == "basic"
754
+ is_jwt = mode == "jwt"
755
+
756
+ build_config["username"]["show"] = is_basic
757
+ build_config["password"]["show"] = is_basic
758
+
759
+ build_config["jwt_token"]["show"] = is_jwt
760
+ build_config["jwt_header"]["show"] = is_jwt
761
+ build_config["bearer_prefix"]["show"] = is_jwt
762
+
763
+ build_config["username"]["required"] = is_basic
764
+ build_config["password"]["required"] = is_basic
765
+
766
+ build_config["jwt_token"]["required"] = is_jwt
767
+ build_config["jwt_header"]["required"] = is_jwt
768
+ build_config["bearer_prefix"]["required"] = False
769
+
770
+ if is_basic:
771
+ build_config["jwt_token"]["value"] = ""
772
+
773
+ return build_config
774
+
775
+ except (KeyError, ValueError) as e:
776
+ self.log(f"update_build_config error: {e}")
241
777
 
242
- self.status = retrieved_data
243
- return retrieved_data
778
+ return build_config
@@ -0,0 +1,11 @@
1
+ """Backwards compatibility module for lfx.logging.
2
+
3
+ This module provides backwards compatibility for code that imports from lfx.logging.
4
+ All functionality has been moved to lfx.log.
5
+ """
6
+
7
+ # Re-export everything from lfx.log for backwards compatibility
8
+ from lfx.log.logger import configure, logger
9
+
10
+ # Maintain the same __all__ exports
11
+ __all__ = ["configure", "logger"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lfx-nightly
3
- Version: 0.1.12.dev12
3
+ Version: 0.1.12.dev13
4
4
  Summary: Langflow Executor - A lightweight CLI tool for executing and serving Langflow AI flows
5
5
  Author-email: Gabriel Luiz Freitas Almeida <gabriel@langflow.org>
6
6
  Requires-Python: <3.14,>=3.10
@@ -264,7 +264,7 @@ lfx/components/duckduckgo/__init__.py,sha256=Y4zaOLVOKsD_qwF7KRLek1pcaKKHa6lGUHO
264
264
  lfx/components/duckduckgo/duck_duck_go_search_run.py,sha256=LlIqWkOJPIde1zEzin6XArYLjkg4ZBNi_AEZLJkfOQo,3074
265
265
  lfx/components/elastic/__init__.py,sha256=tEqQ9UwUyeGttqGXOS2Or7Y50rQnNRWySfMx8u4fV8U,1126
266
266
  lfx/components/elastic/elasticsearch.py,sha256=WcBi8THcOzopZeYOQeEoHxsZkACHk4R3MKhSEYGxnfY,9773
267
- lfx/components/elastic/opensearch.py,sha256=P8Eq4KsjHT8b7iOUOKMFRwOLwgRfIWfxIHLD0GJsw24,9080
267
+ lfx/components/elastic/opensearch.py,sha256=6uyJdHsG-IvfMTXgmRD8O7fCZc82zDguCUCC-na3kv4,29446
268
268
  lfx/components/embeddings/__init__.py,sha256=WP7MRGihB0vkSmqKlBhi2n-ZLMMbwboUbKjQRpIVVCQ,1136
269
269
  lfx/components/embeddings/similarity.py,sha256=EqL8p8g9fPTpMVnVNB3hBpHgZZZg3TbQN9B20vHDnRo,2932
270
270
  lfx/components/embeddings/text_embedder.py,sha256=oYriXXuYKU_kMW-pL0Cuk--4G5CVD0bMlfes4Ge4zIQ,2450
@@ -620,6 +620,7 @@ lfx/load/load.py,sha256=mpQG2RV2ZOysShEOguWKdnQI9TUub1Ds5j89ZbwiQhA,10451
620
620
  lfx/load/utils.py,sha256=qa8aoMLW-X8FO8xVz3YVHQwjTSJYbYr_AOQAAp3smlc,3705
621
621
  lfx/log/__init__.py,sha256=UATLSm1Fp9rVclAXP00LKQzzYKcaboVSuWNujlRR6P4,119
622
622
  lfx/log/logger.py,sha256=_KqyTe1JHI6y4TFyy_WjH8J-vTG4qOhfKN_RWCnGvoA,13369
623
+ lfx/logging/__init__.py,sha256=X5tXF5e1hc62adprRPLtKeaqm8-tpl6loXsxbh9IO-Q,367
623
624
  lfx/memory/__init__.py,sha256=XR7-FSeIxikpi6HSo1bYEXY4ua_1G6oHufD1OCRjynw,2531
624
625
  lfx/memory/stubs.py,sha256=kR6TRI2t6rPvA5Pja5XPC4yvKRBFBuJfdI0hJL8vfwU,9924
625
626
  lfx/processing/__init__.py,sha256=jERZg6it9mhOzrbTAt9YtakSNXPSjUXFh5MfKBN48wA,41
@@ -708,7 +709,7 @@ lfx/utils/schemas.py,sha256=NbOtVQBrn4d0BAu-0H_eCTZI2CXkKZlRY37XCSmuJwc,3865
708
709
  lfx/utils/util.py,sha256=xGR32XDRr_TtruhjnXfI7lEWmk-vgywHAy3kz5SBowc,15725
709
710
  lfx/utils/util_strings.py,sha256=nU_IcdphNaj6bAPbjeL-c1cInQPfTBit8mp5Y57lwQk,1686
710
711
  lfx/utils/version.py,sha256=cHpbO0OJD2JQAvVaTH_6ibYeFbHJV0QDHs_YXXZ-bT8,671
711
- lfx_nightly-0.1.12.dev12.dist-info/METADATA,sha256=kOUtFVWmgxO487sp0ZXsC1vuli0LMgaLD2obh8NA3do,8068
712
- lfx_nightly-0.1.12.dev12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
713
- lfx_nightly-0.1.12.dev12.dist-info/entry_points.txt,sha256=1724p3RHDQRT2CKx_QRzEIa7sFuSVO0Ux70YfXfoMT4,42
714
- lfx_nightly-0.1.12.dev12.dist-info/RECORD,,
712
+ lfx_nightly-0.1.12.dev13.dist-info/METADATA,sha256=OrTDiOi81zmo0ZarKMPRW4fEIYv2ZQgQgoQtUBM6d4g,8068
713
+ lfx_nightly-0.1.12.dev13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
714
+ lfx_nightly-0.1.12.dev13.dist-info/entry_points.txt,sha256=1724p3RHDQRT2CKx_QRzEIa7sFuSVO0Ux70YfXfoMT4,42
715
+ lfx_nightly-0.1.12.dev13.dist-info/RECORD,,