langroid 0.1.85__py3-none-any.whl → 0.1.219__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. langroid/__init__.py +95 -0
  2. langroid/agent/__init__.py +40 -0
  3. langroid/agent/base.py +222 -91
  4. langroid/agent/batch.py +264 -0
  5. langroid/agent/callbacks/chainlit.py +608 -0
  6. langroid/agent/chat_agent.py +247 -101
  7. langroid/agent/chat_document.py +41 -4
  8. langroid/agent/openai_assistant.py +842 -0
  9. langroid/agent/special/__init__.py +50 -0
  10. langroid/agent/special/doc_chat_agent.py +837 -141
  11. langroid/agent/special/lance_doc_chat_agent.py +258 -0
  12. langroid/agent/special/lance_rag/__init__.py +9 -0
  13. langroid/agent/special/lance_rag/critic_agent.py +136 -0
  14. langroid/agent/special/lance_rag/lance_rag_task.py +80 -0
  15. langroid/agent/special/lance_rag/query_planner_agent.py +180 -0
  16. langroid/agent/special/lance_tools.py +44 -0
  17. langroid/agent/special/neo4j/__init__.py +0 -0
  18. langroid/agent/special/neo4j/csv_kg_chat.py +174 -0
  19. langroid/agent/special/neo4j/neo4j_chat_agent.py +370 -0
  20. langroid/agent/special/neo4j/utils/__init__.py +0 -0
  21. langroid/agent/special/neo4j/utils/system_message.py +46 -0
  22. langroid/agent/special/relevance_extractor_agent.py +127 -0
  23. langroid/agent/special/retriever_agent.py +32 -198
  24. langroid/agent/special/sql/__init__.py +11 -0
  25. langroid/agent/special/sql/sql_chat_agent.py +47 -23
  26. langroid/agent/special/sql/utils/__init__.py +22 -0
  27. langroid/agent/special/sql/utils/description_extractors.py +95 -46
  28. langroid/agent/special/sql/utils/populate_metadata.py +28 -21
  29. langroid/agent/special/table_chat_agent.py +43 -9
  30. langroid/agent/task.py +475 -122
  31. langroid/agent/tool_message.py +75 -13
  32. langroid/agent/tools/__init__.py +13 -0
  33. langroid/agent/tools/duckduckgo_search_tool.py +66 -0
  34. langroid/agent/tools/google_search_tool.py +11 -0
  35. langroid/agent/tools/metaphor_search_tool.py +67 -0
  36. langroid/agent/tools/recipient_tool.py +16 -29
  37. langroid/agent/tools/run_python_code.py +60 -0
  38. langroid/agent/tools/sciphi_search_rag_tool.py +79 -0
  39. langroid/agent/tools/segment_extract_tool.py +36 -0
  40. langroid/cachedb/__init__.py +9 -0
  41. langroid/cachedb/base.py +22 -2
  42. langroid/cachedb/momento_cachedb.py +26 -2
  43. langroid/cachedb/redis_cachedb.py +78 -11
  44. langroid/embedding_models/__init__.py +34 -0
  45. langroid/embedding_models/base.py +21 -2
  46. langroid/embedding_models/models.py +120 -18
  47. langroid/embedding_models/protoc/embeddings.proto +19 -0
  48. langroid/embedding_models/protoc/embeddings_pb2.py +33 -0
  49. langroid/embedding_models/protoc/embeddings_pb2.pyi +50 -0
  50. langroid/embedding_models/protoc/embeddings_pb2_grpc.py +79 -0
  51. langroid/embedding_models/remote_embeds.py +153 -0
  52. langroid/language_models/__init__.py +45 -0
  53. langroid/language_models/azure_openai.py +80 -27
  54. langroid/language_models/base.py +117 -12
  55. langroid/language_models/config.py +5 -0
  56. langroid/language_models/openai_assistants.py +3 -0
  57. langroid/language_models/openai_gpt.py +558 -174
  58. langroid/language_models/prompt_formatter/__init__.py +15 -0
  59. langroid/language_models/prompt_formatter/base.py +4 -6
  60. langroid/language_models/prompt_formatter/hf_formatter.py +135 -0
  61. langroid/language_models/utils.py +18 -21
  62. langroid/mytypes.py +25 -8
  63. langroid/parsing/__init__.py +46 -0
  64. langroid/parsing/document_parser.py +260 -63
  65. langroid/parsing/image_text.py +32 -0
  66. langroid/parsing/parse_json.py +143 -0
  67. langroid/parsing/parser.py +122 -59
  68. langroid/parsing/repo_loader.py +114 -52
  69. langroid/parsing/search.py +68 -63
  70. langroid/parsing/spider.py +3 -2
  71. langroid/parsing/table_loader.py +44 -0
  72. langroid/parsing/url_loader.py +59 -11
  73. langroid/parsing/urls.py +85 -37
  74. langroid/parsing/utils.py +298 -4
  75. langroid/parsing/web_search.py +73 -0
  76. langroid/prompts/__init__.py +11 -0
  77. langroid/prompts/chat-gpt4-system-prompt.md +68 -0
  78. langroid/prompts/prompts_config.py +1 -1
  79. langroid/utils/__init__.py +17 -0
  80. langroid/utils/algorithms/__init__.py +3 -0
  81. langroid/utils/algorithms/graph.py +103 -0
  82. langroid/utils/configuration.py +36 -5
  83. langroid/utils/constants.py +4 -0
  84. langroid/utils/globals.py +2 -2
  85. langroid/utils/logging.py +2 -5
  86. langroid/utils/output/__init__.py +21 -0
  87. langroid/utils/output/printing.py +47 -1
  88. langroid/utils/output/status.py +33 -0
  89. langroid/utils/pandas_utils.py +30 -0
  90. langroid/utils/pydantic_utils.py +616 -2
  91. langroid/utils/system.py +98 -0
  92. langroid/vector_store/__init__.py +40 -0
  93. langroid/vector_store/base.py +203 -6
  94. langroid/vector_store/chromadb.py +59 -32
  95. langroid/vector_store/lancedb.py +463 -0
  96. langroid/vector_store/meilisearch.py +10 -7
  97. langroid/vector_store/momento.py +262 -0
  98. langroid/vector_store/qdrantdb.py +104 -22
  99. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/METADATA +329 -149
  100. langroid-0.1.219.dist-info/RECORD +127 -0
  101. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/WHEEL +1 -1
  102. langroid/agent/special/recipient_validator_agent.py +0 -157
  103. langroid/parsing/json.py +0 -64
  104. langroid/utils/web/selenium_login.py +0 -36
  105. langroid-0.1.85.dist-info/RECORD +0 -94
  106. /langroid/{scripts → agent/callbacks}/__init__.py +0 -0
  107. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/LICENSE +0 -0
@@ -1,8 +1,622 @@
1
- from typing import Type
1
+ import logging
2
+ from contextlib import contextmanager
3
+ from typing import (
4
+ Any,
5
+ Dict,
6
+ Generator,
7
+ List,
8
+ Optional,
9
+ Tuple,
10
+ Type,
11
+ TypeVar,
12
+ get_args,
13
+ get_origin,
14
+ no_type_check,
15
+ )
2
16
 
3
- from pydantic import BaseModel
17
+ import numpy as np
18
+ import pandas as pd
19
+ from pydantic import BaseModel, ValidationError, create_model
20
+
21
+ from langroid.mytypes import DocMetaData, Document
22
+
23
+ logger = logging.getLogger(__name__)
4
24
 
5
25
 
6
26
  def has_field(model_class: Type[BaseModel], field_name: str) -> bool:
7
27
  """Check if a Pydantic model class has a field with the given name."""
8
28
  return field_name in model_class.__fields__
29
+
30
+
31
+ def _recursive_purge_dict_key(d: Dict[str, Any], k: str) -> None:
32
+ """Remove a key from a dictionary recursively"""
33
+ if isinstance(d, dict):
34
+ for key in list(d.keys()):
35
+ if key == k and "type" in d.keys():
36
+ del d[key]
37
+ else:
38
+ _recursive_purge_dict_key(d[key], k)
39
+
40
+
41
+ @no_type_check
42
+ def _flatten_pydantic_model_ignore_defaults(
43
+ model: Type[BaseModel],
44
+ base_model: Type[BaseModel] = BaseModel,
45
+ ) -> Type[BaseModel]:
46
+ """
47
+ Given a possibly nested Pydantic class, return a flattened version of it,
48
+ by constructing top-level fields, whose names are formed from the path
49
+ through the nested structure, separated by double underscores.
50
+
51
+ This version ignores inherited defaults, so it is incomplete.
52
+ But retaining it as it is simpler and may be useful in some cases.
53
+ The full version is `flatten_pydantic_model`, see below.
54
+
55
+ Args:
56
+ model (Type[BaseModel]): The Pydantic model to flatten.
57
+ base_model (Type[BaseModel], optional): The base model to use for the
58
+ flattened model. Defaults to BaseModel.
59
+
60
+ Returns:
61
+ Type[BaseModel]: The flattened Pydantic model.
62
+ """
63
+
64
+ flattened_fields: Dict[str, Tuple[Any, ...]] = {}
65
+ models_to_process = [(model, "")]
66
+
67
+ while models_to_process:
68
+ current_model, current_prefix = models_to_process.pop()
69
+
70
+ for name, field in current_model.__annotations__.items():
71
+ if issubclass(field, BaseModel):
72
+ new_prefix = (
73
+ f"{current_prefix}{name}__" if current_prefix else f"{name}__"
74
+ )
75
+ models_to_process.append((field, new_prefix))
76
+ else:
77
+ flattened_name = f"{current_prefix}{name}"
78
+ flattened_fields[flattened_name] = (field, ...)
79
+
80
+ return create_model(
81
+ "FlatModel",
82
+ __base__=base_model,
83
+ **flattened_fields,
84
+ )
85
+
86
+
87
+ def flatten_pydantic_model(
88
+ model: Type[BaseModel],
89
+ base_model: Type[BaseModel] = BaseModel,
90
+ ) -> Type[BaseModel]:
91
+ """
92
+ Given a possibly nested Pydantic class, return a flattened version of it,
93
+ by constructing top-level fields, whose names are formed from the path
94
+ through the nested structure, separated by double underscores.
95
+
96
+ Args:
97
+ model (Type[BaseModel]): The Pydantic model to flatten.
98
+ base_model (Type[BaseModel], optional): The base model to use for the
99
+ flattened model. Defaults to BaseModel.
100
+
101
+ Returns:
102
+ Type[BaseModel]: The flattened Pydantic model.
103
+ """
104
+
105
+ flattened_fields: Dict[str, Any] = {}
106
+ models_to_process = [(model, "")]
107
+
108
+ while models_to_process:
109
+ current_model, current_prefix = models_to_process.pop()
110
+
111
+ for name, field in current_model.__fields__.items():
112
+ if isinstance(field.outer_type_, type) and issubclass(
113
+ field.outer_type_, BaseModel
114
+ ):
115
+ new_prefix = (
116
+ f"{current_prefix}{name}__" if current_prefix else f"{name}__"
117
+ )
118
+ models_to_process.append((field.outer_type_, new_prefix))
119
+ else:
120
+ flattened_name = f"{current_prefix}{name}"
121
+
122
+ if field.default_factory is not field.default_factory:
123
+ flattened_fields[flattened_name] = (
124
+ field.outer_type_,
125
+ field.default_factory,
126
+ )
127
+ elif field.default is not field.default:
128
+ flattened_fields[flattened_name] = (
129
+ field.outer_type_,
130
+ field.default,
131
+ )
132
+ else:
133
+ flattened_fields[flattened_name] = (field.outer_type_, ...)
134
+
135
+ return create_model("FlatModel", __base__=base_model, **flattened_fields)
136
+
137
+
138
+ def get_field_names(model: Type[BaseModel]) -> List[str]:
139
+ """Get all field names from a possibly nested Pydantic model."""
140
+ mdl = flatten_pydantic_model(model)
141
+ fields = list(mdl.__fields__.keys())
142
+ # fields may be like a__b__c , so we only want the last part
143
+ return [f.split("__")[-1] for f in fields]
144
+
145
+
146
+ def generate_simple_schema(
147
+ model: Type[BaseModel], exclude: List[str] = []
148
+ ) -> Dict[str, Any]:
149
+ """
150
+ Generates a JSON schema for a Pydantic model,
151
+ with options to exclude specific fields.
152
+
153
+ This function traverses the Pydantic model's fields, including nested models,
154
+ to generate a dictionary representing the JSON schema. Fields specified in
155
+ the exclude list will not be included in the generated schema.
156
+
157
+ Args:
158
+ model (Type[BaseModel]): The Pydantic model class to generate the schema for.
159
+ exclude (List[str]): A list of string field names to be excluded from the
160
+ generated schema. Defaults to an empty list.
161
+
162
+ Returns:
163
+ Dict[str, Any]: A dictionary representing the JSON schema of the provided model,
164
+ with specified fields excluded.
165
+ """
166
+ if hasattr(model, "__fields__"):
167
+ output: Dict[str, Any] = {}
168
+ for field_name, field in model.__fields__.items():
169
+ if field_name in exclude:
170
+ continue # Skip excluded fields
171
+
172
+ field_type = field.type_
173
+ if issubclass(field_type, BaseModel):
174
+ # Recursively generate schema for nested models
175
+ output[field_name] = generate_simple_schema(field_type, exclude)
176
+ else:
177
+ # Represent the type as a string here
178
+ output[field_name] = {"type": field_type.__name__}
179
+ return output
180
+ else:
181
+ # Non-model type, return a simplified representation
182
+ return {"type": model.__name__}
183
+
184
+
185
+ def flatten_pydantic_instance(
186
+ instance: BaseModel,
187
+ prefix: str = "",
188
+ force_str: bool = False,
189
+ ) -> Dict[str, Any]:
190
+ """
191
+ Given a possibly nested Pydantic instance, return a flattened version of it,
192
+ as a dict where nested traversal paths are translated to keys a__b__c.
193
+
194
+ Args:
195
+ instance (BaseModel): The Pydantic instance to flatten.
196
+ prefix (str, optional): The prefix to use for the top-level fields.
197
+ force_str (bool, optional): Whether to force all values to be strings.
198
+
199
+ Returns:
200
+ Dict[str, Any]: The flattened dict.
201
+
202
+ """
203
+ flat_data: Dict[str, Any] = {}
204
+ for name, value in instance.dict().items():
205
+ # Assuming nested pydantic model will be a dict here
206
+ if isinstance(value, dict):
207
+ nested_flat_data = flatten_pydantic_instance(
208
+ instance.__fields__[name].type_(**value),
209
+ prefix=f"{prefix}{name}__",
210
+ force_str=force_str,
211
+ )
212
+ flat_data.update(nested_flat_data)
213
+ else:
214
+ flat_data[f"{prefix}{name}"] = str(value) if force_str else value
215
+ return flat_data
216
+
217
+
218
+ def extract_fields(doc: BaseModel, fields: List[str]) -> Dict[str, Any]:
219
+ """
220
+ Extract specified fields from a Pydantic object.
221
+ Supports dotted field names, e.g. "metadata.author".
222
+ Dotted fields are matched exactly according to the corresponding path.
223
+ Non-dotted fields are matched against the last part of the path.
224
+ Clashes ignored.
225
+ Args:
226
+ doc (BaseModel): The Pydantic object.
227
+ fields (List[str]): The list of fields to extract.
228
+
229
+ Returns:
230
+ Dict[str, Any]: A dictionary of field names and values.
231
+
232
+ """
233
+
234
+ def get_value(obj: BaseModel, path: str) -> Any | None:
235
+ for part in path.split("."):
236
+ if hasattr(obj, part):
237
+ obj = getattr(obj, part)
238
+ else:
239
+ return None
240
+ return obj
241
+
242
+ def traverse(obj: BaseModel, result: Dict[str, Any], prefix: str = "") -> None:
243
+ for k, v in obj.__dict__.items():
244
+ key = f"{prefix}.{k}" if prefix else k
245
+ if isinstance(v, BaseModel):
246
+ traverse(v, result, key)
247
+ else:
248
+ result[key] = v
249
+
250
+ result: Dict[str, Any] = {}
251
+
252
+ # Extract values for dotted field names and use last part as key
253
+ for field in fields:
254
+ if "." in field:
255
+ value = get_value(doc, field)
256
+ if value is not None:
257
+ key = field.split(".")[-1]
258
+ result[key] = value
259
+
260
+ # Traverse the object to get non-dotted fields
261
+ all_fields: Dict[str, Any] = {}
262
+ traverse(doc, all_fields)
263
+
264
+ # Add non-dotted fields to the result,
265
+ # avoid overwriting if already present from dotted names
266
+ for field in [f for f in fields if "." not in f]:
267
+ for key, value in all_fields.items():
268
+ if key.split(".")[-1] == field and field not in result:
269
+ result[field] = value
270
+
271
+ return result
272
+
273
+
274
+ def nested_dict_from_flat(
275
+ flat_data: Dict[str, Any],
276
+ sub_dict: str = "",
277
+ ) -> Dict[str, Any]:
278
+ """
279
+ Given a flattened version of a nested dict, reconstruct the nested dict.
280
+ Field names in the flattened dict are assumed to be of the form
281
+ "field1__field2__field3", going from top level down.
282
+
283
+ Args:
284
+ flat_data (Dict[str, Any]): The flattened dict.
285
+ sub_dict (str, optional): The name of the sub-dict to extract from the
286
+ flattened dict. Defaults to "" (extract the whole dict).
287
+
288
+ Returns:
289
+ Dict[str, Any]: The nested dict.
290
+
291
+ """
292
+ nested_data: Dict[str, Any] = {}
293
+ for key, value in flat_data.items():
294
+ if sub_dict != "" and not key.startswith(sub_dict + "__"):
295
+ continue
296
+ keys = key.split("__")
297
+ d = nested_data
298
+ for k in keys[:-1]:
299
+ d = d.setdefault(k, {})
300
+ d[keys[-1]] = value
301
+ if sub_dict != "": # e.g. "payload"
302
+ nested_data = nested_data[sub_dict]
303
+ return nested_data
304
+
305
+
306
+ def pydantic_obj_from_flat_dict(
307
+ flat_data: Dict[str, Any],
308
+ model: Type[BaseModel],
309
+ sub_dict: str = "",
310
+ ) -> BaseModel:
311
+ """Flattened dict with a__b__c style keys -> nested dict -> pydantic object"""
312
+ nested_data = nested_dict_from_flat(flat_data, sub_dict)
313
+ return model(**nested_data)
314
+
315
+
316
+ def clean_schema(model: Type[BaseModel], excludes: List[str] = []) -> Dict[str, Any]:
317
+ """
318
+ Generate a simple schema for a given Pydantic model,
319
+ including inherited fields, with an option to exclude certain fields.
320
+ Handles cases where fields are Lists or other generic types and includes
321
+ field descriptions if available.
322
+
323
+ Args:
324
+ model (Type[BaseModel]): The Pydantic model class.
325
+ excludes (List[str]): A list of field names to exclude.
326
+
327
+ Returns:
328
+ Dict[str, Any]: A dictionary representing the simple schema.
329
+ """
330
+ schema = {}
331
+
332
+ for field_name, field_info in model.__fields__.items():
333
+ if field_name in excludes:
334
+ continue
335
+
336
+ field_type = field_info.outer_type_
337
+ description = field_info.field_info.description or ""
338
+
339
+ # Handle generic types like List[...]
340
+ if get_origin(field_type):
341
+ inner_types = get_args(field_type)
342
+ inner_type_names = [
343
+ t.__name__ if hasattr(t, "__name__") else str(t) for t in inner_types
344
+ ]
345
+ field_type_str = (
346
+ f"{get_origin(field_type).__name__}" f'[{", ".join(inner_type_names)}]'
347
+ )
348
+ schema[field_name] = {"type": field_type_str, "description": description}
349
+ elif issubclass(field_type, BaseModel):
350
+ # Directly use the nested model's schema,
351
+ # integrating it into the current level
352
+ nested_schema = clean_schema(field_type, excludes)
353
+ schema[field_name] = {**nested_schema, "description": description}
354
+ else:
355
+ # For basic types, use 'type'
356
+ schema[field_name] = {
357
+ "type": field_type.__name__,
358
+ "description": description,
359
+ }
360
+
361
+ return schema
362
+
363
+
364
+ @contextmanager
365
+ def temp_update(
366
+ pydantic_object: BaseModel, updates: Dict[str, Any]
367
+ ) -> Generator[None, None, None]:
368
+ original_values = {}
369
+ try:
370
+ for field, value in updates.items():
371
+ if hasattr(pydantic_object, field):
372
+ # Save original value
373
+ original_values[field] = getattr(pydantic_object, field)
374
+ setattr(pydantic_object, field, value)
375
+ else:
376
+ # Raise error for non-existent field
377
+ raise AttributeError(
378
+ f"The field '{field}' does not exist in the "
379
+ f"Pydantic model '{pydantic_object.__class__.__name__}'."
380
+ )
381
+ yield
382
+ except ValidationError as e:
383
+ # Handle validation error
384
+ print(f"Validation error: {e}")
385
+ finally:
386
+ # Restore original values
387
+ for field, value in original_values.items():
388
+ setattr(pydantic_object, field, value)
389
+
390
+
391
+ T = TypeVar("T", bound=BaseModel)
392
+
393
+
394
+ @contextmanager
395
+ def temp_params(config: T, field: str, temp: T) -> Generator[None, None, None]:
396
+ """Context manager to temporarily override `field` in a `config`"""
397
+ original_vals = getattr(config, field)
398
+ try:
399
+ # Apply temporary settings
400
+ setattr(config, field, temp)
401
+ yield
402
+ finally:
403
+ # Revert to original settings
404
+ setattr(config, field, original_vals)
405
+
406
+
407
+ def numpy_to_python_type(numpy_type: Type[Any]) -> Type[Any]:
408
+ """Converts a numpy data type to its Python equivalent."""
409
+ type_mapping = {
410
+ np.float64: float,
411
+ np.float32: float,
412
+ np.int64: int,
413
+ np.int32: int,
414
+ np.bool_: bool,
415
+ # Add other numpy types as necessary
416
+ }
417
+ return type_mapping.get(numpy_type, numpy_type)
418
+
419
+
420
+ def dataframe_to_pydantic_model(df: pd.DataFrame) -> Type[BaseModel]:
421
+ """Make a Pydantic model from a dataframe."""
422
+ fields = {col: (type(df[col].iloc[0]), ...) for col in df.columns}
423
+ return create_model("DataFrameModel", __base__=BaseModel, **fields) # type: ignore
424
+
425
+
426
+ def dataframe_to_pydantic_objects(df: pd.DataFrame) -> List[BaseModel]:
427
+ """Make a list of Pydantic objects from a dataframe."""
428
+ Model = dataframe_to_pydantic_model(df)
429
+ return [Model(**row.to_dict()) for index, row in df.iterrows()]
430
+
431
+
432
+ def first_non_null(series: pd.Series) -> Any | None:
433
+ """Find the first non-null item in a pandas Series."""
434
+ for item in series:
435
+ if item is not None:
436
+ return item
437
+ return None
438
+
439
+
440
+ def dataframe_to_document_model(
441
+ df: pd.DataFrame,
442
+ content: str = "content",
443
+ metadata: List[str] = [],
444
+ exclude: List[str] = [],
445
+ ) -> Type[BaseModel]:
446
+ """
447
+ Make a subclass of Document from a dataframe.
448
+
449
+ Args:
450
+ df (pd.DataFrame): The dataframe.
451
+ content (str): The name of the column containing the content,
452
+ which will map to the Document.content field.
453
+ metadata (List[str]): A list of column names containing metadata;
454
+ these will be included in the Document.metadata field.
455
+ exclude (List[str]): A list of column names to exclude from the model.
456
+ (e.g. "vector" when lance is used to add an embedding vector to the df)
457
+
458
+ Returns:
459
+ Type[BaseModel]: A pydantic model subclassing Document.
460
+ """
461
+
462
+ # Remove excluded columns
463
+ df = df.drop(columns=exclude, inplace=False)
464
+ # Check if metadata_cols is empty
465
+
466
+ if metadata:
467
+ # Define fields for the dynamic subclass of DocMetaData
468
+ metadata_fields = {
469
+ col: (
470
+ Optional[numpy_to_python_type(type(first_non_null(df[col])))],
471
+ None, # Optional[numpy_to_python_type(type(first_non_null(df[col])))],
472
+ )
473
+ for col in metadata
474
+ }
475
+ DynamicMetaData = create_model( # type: ignore
476
+ "DynamicMetaData", __base__=DocMetaData, **metadata_fields
477
+ )
478
+ else:
479
+ # Use the base DocMetaData class directly
480
+ DynamicMetaData = DocMetaData
481
+
482
+ # Define additional top-level fields for DynamicDocument
483
+ additional_fields = {
484
+ col: (
485
+ Optional[numpy_to_python_type(type(first_non_null(df[col])))],
486
+ None, # Optional[numpy_to_python_type(type(first_non_null(df[col])))],
487
+ )
488
+ for col in df.columns
489
+ if col not in metadata and col != content
490
+ }
491
+
492
+ # Create a dynamic subclass of Document
493
+ DynamicDocumentFields = {
494
+ **{"metadata": (DynamicMetaData, ...)},
495
+ **additional_fields,
496
+ }
497
+ DynamicDocument = create_model( # type: ignore
498
+ "DynamicDocument", __base__=Document, **DynamicDocumentFields
499
+ )
500
+
501
+ def from_df_row(
502
+ cls: type[BaseModel],
503
+ row: pd.Series,
504
+ content: str = "content",
505
+ metadata: List[str] = [],
506
+ ) -> BaseModel | None:
507
+ content_val = row[content] if (content and content in row) else ""
508
+ metadata_values = (
509
+ {col: row[col] for col in metadata if col in row} if metadata else {}
510
+ )
511
+ additional_values = {
512
+ col: row[col] for col in additional_fields if col in row and col != content
513
+ }
514
+ metadata = DynamicMetaData(**metadata_values)
515
+ return cls(content=content_val, metadata=metadata, **additional_values)
516
+
517
+ # Bind the method to the class
518
+ DynamicDocument.from_df_row = classmethod(from_df_row)
519
+
520
+ return DynamicDocument # type: ignore
521
+
522
+
523
+ def dataframe_to_documents(
524
+ df: pd.DataFrame,
525
+ content: str = "content",
526
+ metadata: List[str] = [],
527
+ doc_cls: Type[BaseModel] | None = None,
528
+ ) -> List[Document]:
529
+ """
530
+ Make a list of Document objects from a dataframe.
531
+ Args:
532
+ df (pd.DataFrame): The dataframe.
533
+ content (str): The name of the column containing the content,
534
+ which will map to the Document.content field.
535
+ metadata (List[str]): A list of column names containing metadata;
536
+ these will be included in the Document.metadata field.
537
+ doc_cls (Type[BaseModel], optional): A Pydantic model subclassing
538
+ Document. Defaults to None.
539
+ Returns:
540
+ List[Document]: The list of Document objects.
541
+ """
542
+ Model = doc_cls or dataframe_to_document_model(df, content, metadata)
543
+ docs = [
544
+ Model.from_df_row(row, content, metadata) # type: ignore
545
+ for _, row in df.iterrows()
546
+ ]
547
+ return [m for m in docs if m is not None]
548
+
549
+
550
+ def extra_metadata(document: Document, doc_cls: Type[Document] = Document) -> List[str]:
551
+ """
552
+ Checks for extra fields in a document's metadata that are not defined in the
553
+ original metadata schema.
554
+
555
+ Args:
556
+ document (Document): The document instance to check for extra fields.
557
+ doc_cls (Type[Document]): The class type derived from Document, used
558
+ as a reference to identify extra fields in the document's metadata.
559
+
560
+ Returns:
561
+ List[str]: A list of strings representing the keys of the extra fields found
562
+ in the document's metadata.
563
+ """
564
+ # Convert metadata to dict, including extra fields.
565
+ metadata_fields = set(document.metadata.dict().keys())
566
+
567
+ # Get defined fields in the metadata of doc_cls
568
+ defined_fields = set(doc_cls.__fields__["metadata"].type_.__fields__.keys())
569
+
570
+ # Identify extra fields not in defined fields.
571
+ extra_fields = list(metadata_fields - defined_fields)
572
+
573
+ return extra_fields
574
+
575
+
576
+ def extend_document_class(d: Document) -> Type[Document]:
577
+ """Generates a new pydantic class based on a given document instance.
578
+
579
+ This function dynamically creates a new pydantic class with additional
580
+ fields based on the "extra" metadata fields present in the given document
581
+ instance. The new class is a subclass of the original Document class, with
582
+ the original metadata fields retained and extra fields added as normal
583
+ fields to the metadata.
584
+
585
+ Args:
586
+ d: An instance of the Document class.
587
+
588
+ Returns:
589
+ A new subclass of the Document class that includes the additional fields
590
+ found in the metadata of the given document instance.
591
+ """
592
+ # Extract the fields from the original metadata class, including types,
593
+ # correctly handling special types like List[str].
594
+ original_metadata_fields = {
595
+ k: (v.outer_type_ if v.shape != 1 else v.type_, ...)
596
+ for k, v in DocMetaData.__fields__.items()
597
+ }
598
+ # Extract extra fields from the metadata instance with their types
599
+ extra_fields = {
600
+ k: (type(v), ...)
601
+ for k, v in d.metadata.__dict__.items()
602
+ if k not in DocMetaData.__fields__
603
+ }
604
+
605
+ # Combine original and extra fields for the new metadata class
606
+ combined_fields = {**original_metadata_fields, **extra_fields}
607
+
608
+ # Create a new metadata class with combined fields
609
+ NewMetadataClass = create_model( # type: ignore
610
+ "ExtendedDocMetadata", **combined_fields, __base__=DocMetaData
611
+ )
612
+ # NewMetadataClass.__config__.arbitrary_types_allowed = True
613
+
614
+ # Create a new document class using the new metadata class
615
+ NewDocumentClass = create_model(
616
+ "ExtendedDocument",
617
+ content=(str, ...),
618
+ metadata=(NewMetadataClass, ...),
619
+ __base__=Document,
620
+ )
621
+
622
+ return NewDocumentClass