linkml-store 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of linkml-store might be problematic. Click here for more details.

@@ -1,3 +1,4 @@
1
+ import importlib
1
2
  import logging
2
3
  from pathlib import Path
3
4
  from typing import Dict, Optional, Union
@@ -7,23 +8,18 @@ from linkml_runtime import SchemaView
7
8
 
8
9
  from linkml_store.api import Database
9
10
  from linkml_store.api.config import ClientConfig
10
- from linkml_store.api.stores.chromadb.chromadb_database import ChromaDBDatabase
11
- from linkml_store.api.stores.duckdb.duckdb_database import DuckDBDatabase
12
- from linkml_store.api.stores.filesystem.filesystem_database import FileSystemDatabase
13
- from linkml_store.api.stores.mongodb.mongodb_database import MongoDBDatabase
14
- from linkml_store.api.stores.neo4j.neo4j_database import Neo4jDatabase
15
- from linkml_store.api.stores.solr.solr_database import SolrDatabase
16
11
 
17
12
  logger = logging.getLogger(__name__)
18
13
 
19
14
 
15
+
20
16
  HANDLE_MAP = {
21
- "duckdb": DuckDBDatabase,
22
- "solr": SolrDatabase,
23
- "mongodb": MongoDBDatabase,
24
- "chromadb": ChromaDBDatabase,
25
- "neo4j": Neo4jDatabase,
26
- "file": FileSystemDatabase,
17
+ "duckdb": "linkml_store.api.stores.duckdb.duckdb_database.DuckDBDatabase",
18
+ "solr": "linkml_store.api.stores.solr.solr_database.SolrDatabase",
19
+ "mongodb": "linkml_store.api.stores.mongodb.mongodb_database.MongoDBDatabase",
20
+ "chromadb": "linkml_store.api.stores.chromadb.chromadb_database.ChromaDBDatabase",
21
+ "neo4j": "linkml_store.api.stores.neo4j.neo4j_database.Neo4jDatabase",
22
+ "file": "linkml_store.api.stores.filesystem.filesystem_database.FileSystemDatabase",
27
23
  }
28
24
 
29
25
 
@@ -155,6 +151,9 @@ class Client:
155
151
  if auto_attach:
156
152
  db = self.attach_database(handle, alias=name, **kwargs)
157
153
  db.from_config(db_config)
154
+ if db_config.source:
155
+ db = self.get_database(name)
156
+ db.store(db_config.source.data)
158
157
 
159
158
  def _set_database_config(self, db: Database):
160
159
  """
@@ -207,7 +206,14 @@ class Client:
207
206
  scheme, _ = handle.split(":", 1)
208
207
  if scheme not in HANDLE_MAP:
209
208
  raise ValueError(f"Unknown scheme: {scheme}")
210
- cls = HANDLE_MAP[scheme]
209
+ module_path, class_name = HANDLE_MAP[scheme].rsplit('.', 1)
210
+ try:
211
+ module = importlib.import_module(module_path)
212
+ cls = getattr(module, class_name)
213
+ except ImportError as e:
214
+ raise ImportError(f"Failed to import {scheme} database. Make sure the correct extras are installed: {e}")
215
+
216
+ #cls = HANDLE_MAP[scheme]
211
217
  db = cls(handle=handle, recreate_if_exists=recreate_if_exists, **kwargs)
212
218
  if schema_view:
213
219
  db.set_schema_view(schema_view)
@@ -470,6 +470,7 @@ class Collection(Generic[DatabaseType]):
470
470
  where: Optional[Any] = None,
471
471
  index_name: Optional[str] = None,
472
472
  limit: Optional[int] = None,
473
+ select_cols: Optional[List[str]] = None,
473
474
  mmr_relevance_factor: Optional[float] = None,
474
475
  **kwargs,
475
476
  ) -> QueryResult:
@@ -503,6 +504,7 @@ class Collection(Generic[DatabaseType]):
503
504
  :param where:
504
505
  :param index_name:
505
506
  :param limit:
507
+ :param select_cols:
506
508
  :param kwargs:
507
509
  :return:
508
510
  """
@@ -538,6 +540,11 @@ class Collection(Generic[DatabaseType]):
538
540
  results = ix.search(query, vector_pairs, limit=limit, mmr_relevance_factor=mmr_relevance_factor, **kwargs)
539
541
  for r in results:
540
542
  del r[1][index_col]
543
+ if select_cols:
544
+ new_results = []
545
+ for r in results:
546
+ new_results.append((r[0], {k: v for k, v in r[1].items() if k in select_cols}))
547
+ results = new_results
541
548
  new_qr = QueryResult(num_rows=len(results))
542
549
  new_qr.ranked_rows = results
543
550
  new_qr.rows = [r[1] for r in results]
@@ -672,6 +679,7 @@ class Collection(Generic[DatabaseType]):
672
679
  """
673
680
  yield from self.find({}, limit=-1).rows
674
681
 
682
+ @property
675
683
  def rows(self) -> List[OBJECT]:
676
684
  """
677
685
  Return a list of objects in the collection.
@@ -91,7 +91,7 @@ class CollectionConfig(ConfiguredBaseModel):
91
91
  )
92
92
  source: Optional[CollectionSource] = Field(
93
93
  default=None,
94
- description="Metadata about the source",
94
+ description="Source for the collection",
95
95
  )
96
96
  derived_from: Optional[List[DerivationConfiguration]] = Field(
97
97
  default=None,
@@ -154,6 +154,10 @@ class DatabaseConfig(ConfiguredBaseModel):
154
154
  default=False,
155
155
  description="Whether to ensure referential integrity",
156
156
  )
157
+ source: Optional[CollectionSource] = Field(
158
+ default=None,
159
+ description="Source for the database",
160
+ )
157
161
 
158
162
 
159
163
  class ClientConfig(ConfiguredBaseModel):
@@ -3,7 +3,7 @@ from pathlib import Path
3
3
  from typing import Optional
4
4
 
5
5
  import yaml
6
- from linkml.utils.schema_builder import SchemaBuilder
6
+ from linkml_runtime.utils.schema_builder import SchemaBuilder
7
7
  from linkml_runtime import SchemaView
8
8
 
9
9
  from linkml_store.api import Database
linkml_store/cli.py CHANGED
@@ -135,12 +135,17 @@ def cli(ctx, verbose: int, quiet: bool, stacktrace: bool, database, collection,
135
135
  logger.setLevel(logging.ERROR)
136
136
  ctx.ensure_object(dict)
137
137
  if input:
138
- stem = underscore(Path(input).stem)
139
- database = "duckdb"
140
- collection = stem
138
+ database = "duckdb" # default: store in duckdb
139
+ if input.startswith("http"):
140
+ parts = input.split("/")
141
+ collection = parts[-1]
142
+ collection = collection.split(".")[0]
143
+ else:
144
+ stem = underscore(Path(input).stem)
145
+ collection = stem
146
+ logger.info(f"Using input file: {input}, "
147
+ f"default storage is {database} and collection is {collection}")
141
148
  config = ClientConfig(databases={"duckdb": {"collections": {stem: {"source": {"local_path": input}}}}})
142
- # collection = Path(input).stem
143
- # database = f"file:{Path(input).parent}"
144
149
  if config is None and DEFAULT_LOCAL_CONF_PATH.exists():
145
150
  config = DEFAULT_LOCAL_CONF_PATH
146
151
  if config is None and DEFAULT_GLOBAL_CONF_PATH.exists():
@@ -178,10 +183,11 @@ def cli(ctx, verbose: int, quiet: bool, stacktrace: bool, database, collection,
178
183
 
179
184
  @cli.command()
180
185
  @click.argument("files", type=click.Path(exists=True), nargs=-1)
186
+ @click.option("--replace/--no-replace", default=False, show_default=True, help="Replace existing objects")
181
187
  @click.option("--format", "-f", type=format_choice, help="Input format")
182
188
  @click.option("--object", "-i", multiple=True, help="Input object as YAML")
183
189
  @click.pass_context
184
- def insert(ctx, files, object, format):
190
+ def insert(ctx, files, replace, object, format):
185
191
  """Insert objects from files (JSON, YAML, TSV) into the specified collection.
186
192
 
187
193
  Using a configuration:
@@ -195,7 +201,6 @@ def insert(ctx, files, object, format):
195
201
  collection = settings.collection
196
202
  if not collection:
197
203
  raise ValueError("Collection must be specified.")
198
- objects = []
199
204
  if not files and not object:
200
205
  files = ["-"]
201
206
  for file_path in files:
@@ -204,13 +209,19 @@ def insert(ctx, files, object, format):
204
209
  else:
205
210
  objects = load_objects(file_path)
206
211
  logger.info(f"Inserting {len(objects)} objects from {file_path} into collection '{collection.alias}'.")
207
- collection.insert(objects)
212
+ if replace:
213
+ collection.replace(objects)
214
+ else:
215
+ collection.insert(objects)
208
216
  click.echo(f"Inserted {len(objects)} objects from {file_path} into collection '{collection.alias}'.")
209
217
  if object:
210
218
  for object_str in object:
211
219
  logger.info(f"Parsing: {object_str}")
212
220
  objects = yaml.safe_load(object_str)
213
- collection.insert(objects)
221
+ if replace:
222
+ collection.replace(objects)
223
+ else:
224
+ collection.insert(objects)
214
225
  click.echo(f"Inserted {len(objects)} objects from {object_str} into collection '{collection.alias}'.")
215
226
  collection.commit()
216
227
 
@@ -534,10 +545,12 @@ def pivot(ctx, where, limit, index, columns, values, output_type, output):
534
545
  @click.option("--evaluation-count", "-n", type=click.INT, help="Number of examples to evaluate over")
535
546
  @click.option("--evaluation-match-function", help="Name of function to use for matching objects in eval")
536
547
  @click.option("--query", "-q", type=click.STRING, help="query term")
548
+ @click.option("--where", "-w", type=click.STRING, help="query term")
537
549
  @click.pass_context
538
550
  def infer(
539
551
  ctx,
540
552
  inference_config_file,
553
+ where,
541
554
  query,
542
555
  evaluation_count,
543
556
  evaluation_match_function,
@@ -579,6 +592,7 @@ def infer(
579
592
  linkml-store -i tests/input/iris.csv inference -t sklearn \
580
593
  -q '{"sepal_length": 5.1, "sepal_width": 3.5, "petal_length": 1.4, "petal_width": 0.2}'
581
594
  """
595
+ where_clause = yaml.safe_load(where) if where else None
582
596
  if query:
583
597
  query_obj = yaml.safe_load(query)
584
598
  else:
@@ -681,6 +695,7 @@ def schema(ctx, output_type, output):
681
695
  @cli.command()
682
696
  @click.argument("search_term")
683
697
  @click.option("--where", "-w", type=click.STRING, help="WHERE clause for the search")
698
+ @click.option("--select", "-s", type=click.STRING, help="SELECT clause for the query, as YAML")
684
699
  @click.option("--limit", "-l", type=click.INT, help="Maximum number of search results")
685
700
  @click.option("--output-type", "-O", type=format_choice, default="json", help="Output format")
686
701
  @click.option("--output", "-o", type=click.Path(), help="Output file path")
@@ -689,13 +704,14 @@ def schema(ctx, output_type, output):
689
704
  )
690
705
  @index_type_option
691
706
  @click.pass_context
692
- def search(ctx, search_term, where, limit, index_type, output_type, output, auto_index):
707
+ def search(ctx, search_term, where, select, limit, index_type, output_type, output, auto_index):
693
708
  """Search objects in the specified collection."""
694
709
  collection = ctx.obj["settings"].collection
695
710
  ix = get_indexer(index_type)
696
711
  logger.info(f"Attaching index to collection {collection.alias}: {ix.model_dump()}")
697
712
  collection.attach_indexer(ix, auto_index=auto_index)
698
- result = collection.search(search_term, where=where, limit=limit)
713
+ select_cols = yaml.safe_load(select) if select else None
714
+ result = collection.search(search_term, where=where, select_cols=select_cols, limit=limit)
699
715
  output_data = render_output([{"score": row[0], **row[1]} for row in result.ranked_rows], output_type)
700
716
  if output:
701
717
  with open(output, "w") as f:
@@ -3,7 +3,6 @@ from pathlib import Path
3
3
  from typing import TYPE_CHECKING, List, Optional
4
4
 
5
5
  import numpy as np
6
- from tiktoken import encoding_for_model
7
6
 
8
7
  from linkml_store.api.config import CollectionConfig
9
8
  from linkml_store.index.indexer import INDEX_ITEM, Indexer
@@ -55,7 +54,7 @@ class LLMIndexer(Indexer):
55
54
 
56
55
  def texts_to_vectors(self, texts: List[str], cache: bool = None, **kwargs) -> List[INDEX_ITEM]:
57
56
  """
58
- Use LLM to embed
57
+ Use LLM to embed.
59
58
 
60
59
  >>> indexer = LLMIndexer(cached_embeddings_database="tests/input/llm_cache.db")
61
60
  >>> vectors = indexer.texts_to_vectors(["hello", "goodbye"])
@@ -63,20 +62,24 @@ class LLMIndexer(Indexer):
63
62
  :param texts:
64
63
  :return:
65
64
  """
65
+ from tiktoken import encoding_for_model
66
66
  logging.info(f"Converting {len(texts)} texts to vectors")
67
67
  model = self.embedding_model
68
- token_limit = get_token_limit(model.model_id)
68
+ # TODO: make this more accurate
69
+ token_limit = get_token_limit(model.model_id) - 200
69
70
  encoding = encoding_for_model("gpt-4o")
70
71
 
71
72
  def truncate_text(text: str) -> str:
72
73
  # split into tokens every 1000 chars:
73
74
  parts = [text[i : i + 1000] for i in range(0, len(text), 1000)]
74
- return render_formatted_text(
75
+ truncated = render_formatted_text(
75
76
  lambda x: "".join(x),
76
77
  parts,
77
78
  encoding,
78
79
  token_limit,
79
80
  )
81
+ logger.debug(f"Truncated text from {len(text)} to {len(truncated)}")
82
+ return truncated
80
83
 
81
84
  texts = [truncate_text(text) for text in texts]
82
85
 
@@ -4,7 +4,7 @@ from abc import ABC
4
4
  from dataclasses import dataclass
5
5
  from enum import Enum
6
6
  from pathlib import Path
7
- from typing import Optional, TextIO, Tuple, Union
7
+ from typing import Optional, TextIO, Tuple, Union, Any
8
8
 
9
9
  import pandas as pd
10
10
  from pydantic import BaseModel, ConfigDict
@@ -67,13 +67,14 @@ class CollectionSlice(BaseModel):
67
67
  # slice: Tuple[Optional[int], Optional[int]] = Field(default=(None, None))
68
68
  indices: Optional[Tuple[int, ...]] = None
69
69
  _collection: Optional[Collection] = None
70
+ where: Any = None
70
71
 
71
72
  @property
72
73
  def collection(self) -> Collection:
73
74
  if not self._collection and not self.indices:
74
75
  return self.base_collection
75
76
  if not self._collection:
76
- rows = self.base_collection.find({}, limit=-1).rows
77
+ rows = self.base_collection.rows
77
78
  subset = [rows[i] for i in self.indices]
78
79
  db = self.base_collection.parent
79
80
  subset_name = self.slice_alias
@@ -94,6 +95,7 @@ class CollectionSlice(BaseModel):
94
95
  """
95
96
  Return the slice of the collection as a dataframe.
96
97
 
98
+ :param flattened: flattned nested objects to give keys like foo.bar
97
99
  :return:
98
100
  """
99
101
  rs = self.collection.find({}, limit=-1)
@@ -12,9 +12,9 @@ from typing import IO, Any, Dict, List, Optional, TextIO, Type, Union
12
12
 
13
13
  import pandas as pd
14
14
  import pystow
15
+ import xmltodict
15
16
  import yaml
16
17
  from pydantic import BaseModel
17
- from tabulate import tabulate
18
18
 
19
19
  logger = logging.getLogger(__name__)
20
20
 
@@ -30,6 +30,7 @@ class Format(Enum):
30
30
  YAMLL = "yamll"
31
31
  TSV = "tsv"
32
32
  CSV = "csv"
33
+ XML = "xml"
33
34
  PYTHON = "python"
34
35
  PARQUET = "parquet"
35
36
  FORMATTED = "formatted"
@@ -50,6 +51,7 @@ class Format(Enum):
50
51
  ".yamll": cls.YAMLL,
51
52
  ".tsv": cls.TSV,
52
53
  ".csv": cls.CSV,
54
+ ".xml": cls.XML,
53
55
  ".py": cls.PYTHON,
54
56
  ".parquet": cls.PARQUET,
55
57
  ".pq": cls.PARQUET,
@@ -124,6 +126,8 @@ def process_file(
124
126
  delimiter = "\t" if format == Format.TSV else ","
125
127
  reader = csv.DictReader(f, delimiter=delimiter)
126
128
  objs = list(reader)
129
+ elif format == Format.XML:
130
+ objs = xmltodict.parse(f.read())
127
131
  elif format == Format.PARQUET:
128
132
  import pyarrow.parquet as pq
129
133
 
@@ -284,6 +288,7 @@ def render_output(
284
288
  elif format == Format.PYTHON:
285
289
  return str(data)
286
290
  elif format == Format.TABLE:
291
+ from tabulate import tabulate
287
292
  return tabulate(pd.DataFrame(data), headers="keys", tablefmt="psql")
288
293
  elif format == Format.YAML:
289
294
  if isinstance(data, list):
@@ -1,6 +1,10 @@
1
- from typing import Callable, List, Optional
1
+ import logging
2
+ from typing import Callable, List, Optional, TYPE_CHECKING
2
3
 
3
- from tiktoken import Encoding
4
+ if TYPE_CHECKING:
5
+ import tiktoken
6
+
7
+ logger = logging.getLogger(__name__)
4
8
 
5
9
  MODEL_TOKEN_MAPPING = {
6
10
  "gpt-4o-mini": 128_000,
@@ -40,7 +44,7 @@ MODEL_TOKEN_MAPPING = {
40
44
  def render_formatted_text(
41
45
  render_func: Callable,
42
46
  values: List[str],
43
- encoding: Encoding,
47
+ encoding: "tiktoken.Encoding",
44
48
  token_limit: int,
45
49
  additional_text: Optional[str] = None,
46
50
  ) -> str:
@@ -67,6 +71,7 @@ def render_formatted_text(
67
71
  if additional_text:
68
72
  token_limit -= len(encoding.encode(additional_text))
69
73
  text_length = len(encoding.encode(text))
74
+ logger.debug(f"Encoding length: {text_length} (original: {len(text)})")
70
75
  if text_length <= token_limit:
71
76
  return text
72
77
  if not values:
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: linkml-store
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: linkml-store
5
5
  License: MIT
6
6
  Author: Author 1
@@ -12,9 +12,11 @@ Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Provides-Extra: all
15
17
  Provides-Extra: analytics
16
18
  Provides-Extra: app
17
- Provides-Extra: chromadb
19
+ Provides-Extra: bigquery
18
20
  Provides-Extra: fastapi
19
21
  Provides-Extra: frictionless
20
22
  Provides-Extra: h5py
@@ -29,25 +31,26 @@ Provides-Extra: scipy
29
31
  Provides-Extra: tests
30
32
  Provides-Extra: validation
31
33
  Requires-Dist: black (>=24.0.0) ; extra == "tests"
32
- Requires-Dist: chromadb ; extra == "chromadb"
33
34
  Requires-Dist: click
34
35
  Requires-Dist: duckdb (>=0.10.1)
35
36
  Requires-Dist: duckdb-engine (>=0.11.2)
36
37
  Requires-Dist: fastapi ; extra == "fastapi"
37
38
  Requires-Dist: frictionless ; extra == "frictionless"
38
39
  Requires-Dist: gcsfs ; extra == "ibis"
40
+ Requires-Dist: google-cloud-bigquery ; extra == "bigquery"
39
41
  Requires-Dist: h5py ; extra == "h5py"
40
42
  Requires-Dist: ibis-framework[duckdb,examples] (>=9.3.0) ; extra == "ibis"
41
43
  Requires-Dist: jinja2 (>=3.1.4,<4.0.0)
42
44
  Requires-Dist: jsonlines (>=4.0.0,<5.0.0)
45
+ Requires-Dist: jsonpatch (>=1.33,<2.0)
43
46
  Requires-Dist: linkml (>=1.8.0) ; extra == "validation"
44
47
  Requires-Dist: linkml-runtime (>=1.8.0)
45
48
  Requires-Dist: linkml_map ; extra == "map"
46
49
  Requires-Dist: linkml_renderer ; extra == "renderer"
47
- Requires-Dist: llm ; extra == "llm"
50
+ Requires-Dist: llm ; extra == "llm" or extra == "all"
48
51
  Requires-Dist: matplotlib ; extra == "analytics"
49
52
  Requires-Dist: multipledispatch ; extra == "ibis"
50
- Requires-Dist: neo4j ; extra == "neo4j"
53
+ Requires-Dist: neo4j ; extra == "neo4j" or extra == "all"
51
54
  Requires-Dist: networkx ; extra == "neo4j"
52
55
  Requires-Dist: pandas (>=2.2.1) ; extra == "analytics"
53
56
  Requires-Dist: plotly ; extra == "analytics"
@@ -62,8 +65,10 @@ Requires-Dist: scipy ; extra == "scipy"
62
65
  Requires-Dist: seaborn ; extra == "analytics"
63
66
  Requires-Dist: sqlalchemy
64
67
  Requires-Dist: streamlit (>=1.32.2,<2.0.0) ; extra == "app"
68
+ Requires-Dist: tabulate
65
69
  Requires-Dist: tiktoken ; extra == "llm"
66
70
  Requires-Dist: uvicorn ; extra == "fastapi"
71
+ Requires-Dist: xmltodict (>=0.13.0,<0.14.0)
67
72
  Description-Content-Type: text/markdown
68
73
 
69
74
  # linkml-store
@@ -1,8 +1,8 @@
1
1
  linkml_store/__init__.py,sha256=jlU6WOUAn8cKIhzbTULmBTWpW9gZdEt7q_RI6KZN1bY,118
2
2
  linkml_store/api/__init__.py,sha256=3CelcFEFz0y3MkQAzhQ9JxHIt1zFk6nYZxSmYTo8YZE,226
3
- linkml_store/api/client.py,sha256=3klBXenQVbLjNQF3WmYfjASt3zvKOfWaCNp5aJM81Ec,12034
4
- linkml_store/api/collection.py,sha256=YVmfqdZaWfLAw3yzho-GEknsAiV1h5Z3O6csB_8CTY0,39407
5
- linkml_store/api/config.py,sha256=71pxQ5jM-ETxJWU7CzmKjsH6IEJUMP5sml381u9TYVk,5654
3
+ linkml_store/api/client.py,sha256=wFVgl1NUovaKLqNVUEt9dsnoIzjzqFvktJVncAupdE4,12362
4
+ linkml_store/api/collection.py,sha256=CGvWxH7HRhjDt9Cp3SGdMqyhYx7Q4fRKUtAJV74_l0g,39711
5
+ linkml_store/api/config.py,sha256=pOz210JIwkEEXtfjcsZBp1UEedkBu8RkH62Qa1b4exI,5777
6
6
  linkml_store/api/database.py,sha256=nvae8jnOZsQIFCsl_lRBnKcvrpJg4A10ujIKGeMyUS8,29350
7
7
  linkml_store/api/queries.py,sha256=tx9fgGY5fC_2ZbIvg4BqTK_MXJwA_DI4mxr8HdQ6Vos,2075
8
8
  linkml_store/api/stores/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -15,7 +15,7 @@ linkml_store/api/stores/duckdb/duckdb_database.py,sha256=GH9bcOfHpNp6r-Eu1C3W0xu
15
15
  linkml_store/api/stores/duckdb/mappings.py,sha256=tDce3W1Apwammhf4LS6cRJ0m4NiJ0eB7vOI_4U5ETY8,148
16
16
  linkml_store/api/stores/filesystem/__init__.py,sha256=KjvCjdttwqMHNeGyL-gr59zRz0--HFEWWUNNCJ5hITs,347
17
17
  linkml_store/api/stores/filesystem/filesystem_collection.py,sha256=9gqY2KRZsn_RWk4eKkxFd3_wcxs5YaXvcBI7GGJBMGE,6751
18
- linkml_store/api/stores/filesystem/filesystem_database.py,sha256=sV-lueyrh3R6edyWkwN6qKa7yjPc8PIcF1rxgox6oA4,2875
18
+ linkml_store/api/stores/filesystem/filesystem_database.py,sha256=e9hSGoaOxr_sG_RhjgzV_yvdQ_xbHHXHJDtufWzAX4E,2883
19
19
  linkml_store/api/stores/hdf5/__init__.py,sha256=l4cIh3v7P0nPbwGIsfuCMD_serQ8q8c7iuUA9W2Jb4o,97
20
20
  linkml_store/api/stores/hdf5/hdf5_collection.py,sha256=mnpLMYehn3PuaIjp2dXrIWu8jh-bdQ84X2Ku83jMdEY,3805
21
21
  linkml_store/api/stores/hdf5/hdf5_database.py,sha256=EZbjrpaqiNDEFvoD5dZNcGBXA8z6HRNL81emueTZWNw,2714
@@ -30,13 +30,13 @@ linkml_store/api/stores/solr/solr_collection.py,sha256=ZlxC3JbVaHfSA4HuTeJTsp6qe
30
30
  linkml_store/api/stores/solr/solr_database.py,sha256=TFjqbY7jAkdrhAchbNg0E-mChSP7ogNwFExslbvX7Yo,2877
31
31
  linkml_store/api/stores/solr/solr_utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  linkml_store/api/types.py,sha256=3aIQtDFMvsSmjuN5qrR2vNK5sHa6yzD_rEOPA6tHwvg,176
33
- linkml_store/cli.py,sha256=wl8BhnPcSU6Lt-jsvN1o6086PpUAfu43n5GI6w9SGxw,29384
33
+ linkml_store/cli.py,sha256=bWbWQita8KCBjzovBRzQqHtjbRrf7Ttxq0Fe8zrDuds,30235
34
34
  linkml_store/constants.py,sha256=x4ZmDsfE9rZcL5WpA93uTKrRWzCD6GodYXviVzIvR38,112
35
35
  linkml_store/graphs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  linkml_store/graphs/graph_map.py,sha256=bYRxv8n1YPnFqE9d6JKNmRawb8EAhsPlHhBue0gvtZE,712
37
37
  linkml_store/index/__init__.py,sha256=6SQzDe-WZSSqbGNsbCDfyPTyz0s9ISDKw1dm9xgQuT4,1396
38
38
  linkml_store/index/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- linkml_store/index/implementations/llm_indexer.py,sha256=y1xvfUm_rl4UEiWJbsUsEnTCma98XRB9C1XOnuaAv5o,5474
39
+ linkml_store/index/implementations/llm_indexer.py,sha256=ja7UXhQj7F0g6HiRIJ8EBPuM86nOgr49jkh7eh_nCHs,5644
40
40
  linkml_store/index/implementations/simple_indexer.py,sha256=KnkFJtXTHnwjhD_D6ZK2rFhBID1dgCedcOVPEWAY2NU,1282
41
41
  linkml_store/index/indexer.py,sha256=e5dsjh2wjOTDRsfClKJAFTbcK1UC7BOGkUCOfDg9omI,7635
42
42
  linkml_store/inference/__init__.py,sha256=b8NAFNZjOYU_8gOvxdyCyoiHOOl5Ai2ckKs1tv7ZkkY,342
@@ -46,14 +46,14 @@ linkml_store/inference/implementations/rag_inference_engine.py,sha256=mN7YQI-BeZ
46
46
  linkml_store/inference/implementations/rule_based_inference_engine.py,sha256=0IEY_fsHJPJy6QKbYQU_qE87RRnPOXQxPuJKXCQG8jU,6250
47
47
  linkml_store/inference/implementations/sklearn_inference_engine.py,sha256=Sdi7CoRK3qoLJu3prgLy1Ck_zQ1gHWRKFybHe7XQ4_g,13192
48
48
  linkml_store/inference/inference_config.py,sha256=EFGdigxWsfTPREbgqyJVRShN0JktCEmFLLoECrLfXSg,2282
49
- linkml_store/inference/inference_engine.py,sha256=l2UB6cA0rW7a9qyiv8JF5Nzj8nRHGX_yqMYbiDnY1Qc,7055
49
+ linkml_store/inference/inference_engine.py,sha256=IxQIOgmXCDI8ilCGtoaVA_1wFROUg4uH1_yGbX78N2U,7139
50
50
  linkml_store/inference/inference_engine_registry.py,sha256=6o66gvBYBwdeAKm62zqqvfaBlcopVP_cla3L6uXGsHA,3015
51
51
  linkml_store/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  linkml_store/utils/change_utils.py,sha256=O2rvSvgTKB60reLLz9mX5OWykAA_m93bwnUh5ZWa0EY,471
53
53
  linkml_store/utils/file_utils.py,sha256=rQ7-XpmI6_Kx_dhEnI98muFRr0MmgI_kZ_9cgJBf_0I,1411
54
- linkml_store/utils/format_utils.py,sha256=airJ2_tFsr0dTIbSHT5y0TZbDrvBBV4_qThFPFY5k8U,10925
54
+ linkml_store/utils/format_utils.py,sha256=sjpdJJ8Ww2ilm03mQt_v4QkZvQMymqUeTiPS3U1ViKM,11067
55
55
  linkml_store/utils/io.py,sha256=JHUrWDtlZC2jtN_PQZ4ypdGIyYlftZEN3JaCvEPs44w,884
56
- linkml_store/utils/llm_utils.py,sha256=3jRFUtEywoKdomKb3aCH1GdI9hQJOQo8Udb3Jy4M-Xw,2885
56
+ linkml_store/utils/llm_utils.py,sha256=0lvR_lBSDSuP-0Eum16QBUsSv8sWfDjZPz_MnDSPvn0,3048
57
57
  linkml_store/utils/mongodb_utils.py,sha256=Rl1YmMKs1IXwSsJIViSDChbi0Oer5cBnMmjka2TeQS8,4665
58
58
  linkml_store/utils/neo4j_utils.py,sha256=y3KPmDZ8mQmePgg0lUeKkeKqzEr2rV226xxEtHc5pRg,1266
59
59
  linkml_store/utils/object_utils.py,sha256=Vib-5Ip2DlRVKLZpU-008ZZI813-vfKVSCY0TksRenM,6293
@@ -73,8 +73,8 @@ linkml_store/webapi/html/database_details.html.j2,sha256=qtXdavbZb0mohiObI9dvJtk
73
73
  linkml_store/webapi/html/databases.html.j2,sha256=a9BCWQYfPeFhdUd31CWhB0yWhTIFXQayO08JgjyqKoc,294
74
74
  linkml_store/webapi/html/generic.html.j2,sha256=KtLaO2HUEF2Opq-OwHKgRKetNWe8IWc6JuIkxRPsywk,1018
75
75
  linkml_store/webapi/main.py,sha256=B0Da575kKR7X88N9ykm99Dem8FyBAW9f-w3A_JwUzfw,29165
76
- linkml_store-0.2.2.dist-info/LICENSE,sha256=77mDOslUnalYnuq9xQYZKtIoNEzcH9mIjvWHOKjamnE,1086
77
- linkml_store-0.2.2.dist-info/METADATA,sha256=_zde_tfX6AAw1ZvM1LnYOmzkQbiz6f3rQhVyBKODdnE,6977
78
- linkml_store-0.2.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
79
- linkml_store-0.2.2.dist-info/entry_points.txt,sha256=gWxVsHqx-t-UKWFHFzawQTvs4is4vC1rCF5AeKyqWWk,101
80
- linkml_store-0.2.2.dist-info/RECORD,,
76
+ linkml_store-0.2.4.dist-info/LICENSE,sha256=77mDOslUnalYnuq9xQYZKtIoNEzcH9mIjvWHOKjamnE,1086
77
+ linkml_store-0.2.4.dist-info/METADATA,sha256=PJX-_TSPk6WDXDCmvuFDUb5649ECQc2N6zP4pWqhBvU,7204
78
+ linkml_store-0.2.4.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
79
+ linkml_store-0.2.4.dist-info/entry_points.txt,sha256=gWxVsHqx-t-UKWFHFzawQTvs4is4vC1rCF5AeKyqWWk,101
80
+ linkml_store-0.2.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 2.0.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any