qlever 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qlever might be problematic. Click here for more details.

@@ -1,28 +1,24 @@
1
1
  # Qleverfile for DBLP, use with https://github.com/ad-freiburg/qlever-control
2
2
  #
3
- # qlever get-data # downloads .gz file of size ~3 GB (as of 31.07.2022)
4
- # qlever index # takes ~30 minutes and ~20 GB RAM (on an AMD Ryzen 9 5900X)
5
- # qlever start # starts the server (takes around 2 minutes)
6
- #
7
- # Also builds a text index for fast kewyword search in literals. Without that
8
- # (WITH_TEXT_INDEX = false), the index build takes only ~10 minutes.
3
+ # qlever get-data # takes ~3 mins (downloads .ttl.gz file of size ~3 GB)
4
+ # qlever index # takes ~3 mins (on an AMD Ryzen 9 5900X)
5
+ # qlever start # takes a few seconds
9
6
 
10
7
  [data]
11
- NAME = dblp
12
- GET_DATA_URL = https://dblp.org/rdf/${index:INPUT_FILES}
13
- GET_DATA_CMD = curl -LO -C - ${GET_DATA_URL}
14
- DESCRIPTION = DBLP computer science bibliography, data from ${GET_DATA_URL}
15
- TEXT_DESCRIPTION = All literals, search with FILTER KEYWORDS(?text, "...")
8
+ NAME = dblp
9
+ GET_DATA_URL = https://dblp.org/rdf/${index:INPUT_FILES}
10
+ GET_DATA_CMD = curl -LRC - O ${GET_DATA_URL} 2>&1 | tee ${data:NAME}.download-log.txt
11
+ VERSION = $$(date -r dblp.ttl.gz +"%d.%m.%Y %H:%M" || echo "NO_DATE")
12
+ DESCRIPTION = DBLP computer science bibliography, data from ${GET_DATA_URL} (version ${VERSION})
16
13
 
17
14
  [index]
18
15
  INPUT_FILES = dblp.ttl.gz
19
16
  CAT_INPUT_FILES = zcat ${INPUT_FILES}
20
- SETTINGS_JSON = { "ascii-prefixes-only": false, "num-triples-per-batch": 1000000 }
21
- TEXT_INDEX = from_literals
17
+ SETTINGS_JSON = { "ascii-prefixes-only": false, "num-triples-per-batch": 1000000, "prefixes-external": [""] }
22
18
 
23
19
  [server]
24
20
  PORT = 7015
25
- ACCESS_TOKEN = ${data:NAME}_7643543846
21
+ ACCESS_TOKEN = ${data:NAME}
26
22
  MEMORY_FOR_QUERIES = 30G
27
23
  CACHE_MAX_SIZE = 5G
28
24
 
@@ -1,8 +1,12 @@
1
1
  # Qleverfile for Olympics, use with https://github.com/ad-freiburg/qlever-control
2
2
  #
3
- # qlever get-data # takes ~ 10 min to download .nt.gz file of size ~ 8 GB
4
- # qlever index # takes ~ 20 min and ~ 5 GB RAM (on an AMD Ryzen 9 5900X)
5
- # qlever start # starts the server
3
+ # qlever get-data # takes ~ 10 mins to download .nt.gz file of size ~ 8 GB
4
+ # qlever index # takes ~ 5 min and ~ 5 GB RAM (on an AMD Ryzen 9 5900X)
5
+ # qlever start # starts the server (instantaneous)
6
+ #
7
+ # IMPORTANT: The current files contain invalid floating point literals. To make
8
+ # QLever ignore them, compile QLever with `invalidLiteralsAreSkipped_ = true`
9
+ # in `src/parser/TurtleParserBase.h:55`.
6
10
  #
7
11
  # NOTE: https://data.dnb.de/opendata/ is rather confusing becase of the many
8
12
  # files. This Qleverfile downloads all the datasets named "Gesamtabzug", except
@@ -13,24 +17,23 @@
13
17
  [data]
14
18
  NAME = dnb
15
19
  BASE_URL = https://data.dnb.de/opendata
16
- GET_DATA_CMD = curl -L -C - --remote-name-all ${BASE_URL}/authorities-gnd_lds.nt.gz ${BASE_URL}/dnb-all_lds.nt.gz ${BASE_URL}/dnb-all_ldsprov.nt.gz ${BASE_URL}/zdb_lds.nt.gz
17
- DESCRIPTION = DNB data from ${BASE_ULR} (authoritities-gnd_lds, dnb_all_lds, dnb-all_ldsprov, zdb_lds)
18
- TEXT_DESCRIPTION = All literals, search with FILTER KEYWORDS(?var, "...")
20
+ GET_DATA_CMD = curl -L -C - --remote-name-all ${BASE_URL}/authorities-gnd_lds.nt.gz ${BASE_URL}/dnb-all_lds.nt.gz ${BASE_URL}/dnb-all_ldsprov.nt.gz ${BASE_URL}/zdb_lds.nt.gz 2>&1 | tee ${data:NAME}.getdata-log.txt
21
+ VERSION = $$(date -r dnb-all_lds.nt.gz +%d.%m.%Y || echo "NO_DATE")
22
+ DESCRIPTION = DNB data from ${BASE_URL} (authoritities-gnd_lds, dnb_all_lds, dnb-all_ldsprov, zdb_lds), version ${VERSION}
19
23
 
20
24
  [index]
21
25
  INPUT_FILES = *.nt.gz
22
- CAT_INPUT_FILES = zcat ${FILE_NAMES}
26
+ CAT_INPUT_FILES = zcat ${INPUT_FILES}
23
27
  SETTINGS_JSON = { "ascii-prefixes-only": true, "num-triples-per-batch": 1000000 }
24
- TEXT_INDEX = from_literals
25
28
 
26
29
  [server]
27
30
  PORT = 7035
28
- ACCESS_TOKEN = ${data:NAME}_284732743
29
- MEMORY_FOR_QUERIES = 10G
31
+ ACCESS_TOKEN = ${data:NAME}
32
+ MEMORY_FOR_QUERIES = 5G
30
33
  CACHE_MAX_SIZE = 2G
31
34
 
32
35
  [runtime]
33
- SYSTEM = true
36
+ SYSTEM = docker
34
37
  IMAGE = docker.io/adfreiburg/qlever:latest
35
38
 
36
39
  [ui]
@@ -2,18 +2,18 @@
2
2
  #
3
3
  # qlever get-data # ~20 mins (download PBF, convert to TTL, add GeoSPARQL triples)
4
4
  # qlever index # ~10 mins and ~5 GB RAM (on an AMD Ryzen 9 5900X)
5
- # qlever start # start the server (instantaneous)
5
+ # qlever start # ~1 sec
6
6
  #
7
7
  # For `qlever get-data` to work, `osm2rdf` and `spatialjoin` must be installed
8
8
  # and included in the `PATH`.
9
9
 
10
10
  [data]
11
11
  NAME = ohm-planet
12
- GET_DATA_URL = $$(curl -s https://planet.openhistoricalmap.org/planet/state.txt)
13
- GET_DATA_CMD_1 = curl -LRfC - -o ${NAME}.pbf ${GET_DATA_URL}
12
+ GET_DATA_URL = https://planet.openhistoricalmap.org/planet
13
+ GET_DATA_CMD_1 = curl -LRfC - -o ${NAME}.pbf $$(curl -s ${GET_DATA_URL}/state.txt) 2>&1 | tee ${NAME}.download-log.txt
14
14
  GET_DATA_CMD_2 = osm2rdf ${NAME}.pbf -o ${NAME}.ttl --source-dataset OHM --simplify-wkt 0 --write-ogc-geo-triples none 2>&1 | tee ${NAME}.osm2rdf-log.txt
15
- GET_DATA_CMD_3 = bzcat ${NAME}.ttl.bz2 | \grep "^osm2rdf" | sed -En 's/^osm2rdf(geom)?:(ohm_)?(node|rel|way)[a-z]*_([0-9]+) geo:asWKT "([^\"]+)".*/ohm\3:\4\t\5/p' | tee ${NAME}.spatialjoin-input.tsv | spatialjoin --contains ' ogc:sfContains ' --intersects ' ogc:sfIntersects ' --suffix $$' .\n' -o ${NAME}.spatialjoin-triples.ttl.bz2 2>&1 | tee ${NAME}.spatialjoin-log.txt && rm -f areas events lines points simplelines
16
- GET_DATA_CMD = ${GET_DATA_CMD_1} && echo && ${GET_DATA_CMD_2} && echo && ${GET_DATA_CMD_3} && bzcat ${NAME}.ttl.bz2 | head -100 | \grep "^@prefix" > ${NAME}.prefix-definitions
15
+ GET_DATA_CMD_3 = bzcat ${NAME}.ttl.bz2 | \grep "^osm2rdf" | sed -En 's/^osm2rdf(geom)?:(ohm_)?(node|rel|way)[a-z]*_([0-9]+) geo:asWKT "([^\"]+)".*/ohm\3:\4\t\5/p' | tee ${NAME}.spatialjoin-input.tsv | spatialjoin --contains " ogc:sfContains " --covers " ogc:sfCovers " --intersects " ogc:sfIntersects " --equals " ogc:sfEquals " --touches " ogc:sfTouches " --crosses " ogc:sfCrosses " --overlaps " ogc:sfOverlaps " --suffix $$' .\n' -o ${NAME}.spatialjoin-triples.ttl.bz2 2>&1 | tee ${NAME}.spatialjoin-log.txt && rm -f areas events lines points simplelines
16
+ GET_DATA_CMD = set -o pipefail && ${GET_DATA_CMD_1} && echo && ${GET_DATA_CMD_2} && echo && ${GET_DATA_CMD_3} && head -100 <(bzcat ${NAME}.ttl.bz2) | sed '/^@prefix/!d' > ${NAME}.prefix-definitions
17
17
  VERSION = $$(date -r ${NAME}.pbf +%d.%m.%Y || echo "NO_DATE")
18
18
  DESCRIPTION = OHM Planet, data from ${GET_DATA_URL} version ${VERSION} (with GeoSPARQL predicates ogc:sfContains and ogc:sfIntersects)
19
19
 
@@ -24,7 +24,7 @@ SETTINGS_JSON = { "prefixes-external": [""], "ascii-prefixes-only": false, "p
24
24
 
25
25
  [server]
26
26
  PORT = 7037
27
- ACCESS_TOKEN = ${data:NAME}_32673264324
27
+ ACCESS_TOKEN = ${data:NAME}
28
28
  MEMORY_FOR_QUERIES = 10G
29
29
  CACHE_MAX_SIZE = 5G
30
30
  WARMUP_CMD = curl -s http://localhost:${PORT} -H "Accept: application/qlever-results+json" --data-urlencode "query=PREFIX geo: <http://www.opengis.net/ont/geosparql#> SELECT ?subject ?geometry WHERE { ?subject geo:hasGeometry ?m . ?m geo:asWKT ?geometry } INTERNAL SORT BY ?subject" --data-urlencode "access-token=${server:ACCESS_TOKEN}" --data-urlencode "pinresult=true" --data-urlencode "send=0" | jq .resultsize | xargs printf "Result size: %'d\n"
@@ -1,39 +1,29 @@
1
- # Qleverfile for Scientists, use with https://github.com/ad-freiburg/qlever-control
1
+ # Qleverfile for "scientists", use with qlever script (pip install qlever)
2
2
  #
3
- # qlever get-data # download .zip file of size 79 MB, uncompressed to 318 MB
4
- # qlever index # takes ~20 seconds and ~1 GB RAM (on an AMD Ryzen 9 5900X)
5
- # qlever start # starts the server (instant)
6
- #
7
- # Also builds a text index for keyword search on the literals AND keyword search
8
- # in Wikipedia sentences linked to the RDF data; see TEXT_DESCRIPTION below.
3
+ # qlever get-data # get "scientists" dataset (370M triples, 2.2 M texts records)
4
+ # qlever index # build index, including text index (takes ~20 seconds)
5
+ # qlever start # start the server (instant)
6
+
7
+ [data]
8
+ NAME = scientists
9
+ GET_DATA_CMD = curl -LRC - -O https://github.com/ad-freiburg/qlever/raw/master/e2e/scientist-collection.zip && unzip -j scientist-collection.zip && rm -f scientist-collection.zip
10
+ DESCRIPTION = Test collection from https://github.com/ad-freiburg/qlever/tree/master/e2e (triples and text about scientists)
11
+ TEXT_DESCRIPTION = Text from all literals and Wikipedia articles on scientists (use ql:contains-entity and ql:contains-word)
9
12
 
10
- # Indexer settings
11
- DB = scientists
12
- RDF_FILES = "${DB}.nt"
13
- CAT_FILES = "cat ${RDF_FILES}"
14
- WITH_TEXT_INDEX = from_text_records_and_literals
15
- STXXL_MEMORY = 1G
16
- SETTINGS_JSON = '{ "ascii-prefixes-only": true, "num-triples-per-batch": 100000 }'
17
- GET_DATA_CMD = "wget https://github.com/ad-freiburg/qlever/raw/master/e2e/scientist-collection.zip && unzip -j scientist-collection.zip && rm -f scientist-collection.zip"
18
- INDEX_DESCRIPTION = "Scientist collection from QLever's end-to-end test, see https://github.com/ad-freiburg/qlever/tree/master/e2e"
19
- TEXT_DESCRIPTION = "Literals (use FILTER CONTAINS) and Wikipedia articles (use ql:contains-entity and ql:contains-word)"
13
+ [index]
14
+ INPUT_FILES = ${data:NAME}.nt
15
+ CAT_INPUT_FILES = cat ${INPUT_FILES}
16
+ SETTINGS_JSON = { "ascii-prefixes-only": true, "num-triples-per-batch": 100000 }
17
+ TEXT_INDEX = from_text_records_and_literals
20
18
 
21
- # Server settings
22
- HOSTNAME = $(hostname -f)
23
- SERVER_PORT = 7020
24
- ACCESS_TOKEN = ${DB}_%RANDOM%
25
- MEMORY_FOR_QUERIES = 5G
26
- CACHE_MAX_SIZE = 2G
27
- CACHE_MAX_SIZE_SINGLE_ENTRY = 1G
28
- CACHE_MAX_NUM_ENTRIES = 100
19
+ [server]
20
+ PORT = 7020
21
+ ACCESS_TOKEN = ${data:NAME}
22
+ MEMORY_FOR_QUERIES = 5G
29
23
 
30
- # QLever binaries
31
- QLEVER_BIN_DIR = %QLEVER_BIN_DIR%
32
- USE_DOCKER = true
33
- QLEVER_DOCKER_IMAGE = adfreiburg/qlever
34
- QLEVER_DOCKER_CONTAINER = qlever.scientists
24
+ [runtime]
25
+ SYSTEM = docker
26
+ IMAGE = docker.io/adfreiburg/qlever:latest
35
27
 
36
- # QLever UI
37
- QLEVERUI_PORT = 7000
38
- QLEVERUI_DIR = qlever-ui
39
- QLEVERUI_CONFIG = default
28
+ [ui]
29
+ UI_CONFIG = scientists
@@ -0,0 +1,73 @@
1
+ from __future__ import annotations
2
+
3
+ import shlex
4
+ import time
5
+ import traceback
6
+
7
+ from qlever.command import QleverCommand
8
+ from qlever.log import log
9
+ from qlever.util import run_command
10
+
11
+
12
+ class QueryCommand(QleverCommand):
13
+ """
14
+ Class for executing the `query` command.
15
+ """
16
+
17
+ def __init__(self):
18
+ pass
19
+
20
+ def description(self) -> str:
21
+ return ("Send a query to a SPARQL endpoint")
22
+
23
+ def should_have_qleverfile(self) -> bool:
24
+ return False
25
+
26
+ def relevant_qleverfile_arguments(self) -> dict[str: list[str]]:
27
+ return {"server": ["port"]}
28
+
29
+ def additional_arguments(self, subparser) -> None:
30
+ subparser.add_argument("--query", type=str,
31
+ default="SELECT * WHERE { ?s ?p ?o } LIMIT 10",
32
+ help="SPARQL query to send")
33
+ subparser.add_argument("--sparql-endpoint", type=str,
34
+ help="URL of the SPARQL endpoint")
35
+ subparser.add_argument("--accept", type=str,
36
+ choices=["text/tab-separated-values",
37
+ "text/csv",
38
+ "application/sparql-results+json",
39
+ "application/sparql-results+xml",
40
+ "application/qlever-results+json"],
41
+ default="text/tab-separated-values",
42
+ help="Accept header for the SPARQL query")
43
+ subparser.add_argument("--no-time", action="store_true",
44
+ default=False,
45
+ help="Do not print the (end-to-end) time taken")
46
+
47
+ def execute(self, args) -> bool:
48
+ # Show what the command will do.
49
+ sparql_endpoint = (args.sparql_endpoint if args.sparql_endpoint
50
+ else f"localhost:{args.port}")
51
+ curl_cmd = (f"curl -s {sparql_endpoint}"
52
+ f" -H \"Accept: {args.accept}\""
53
+ f" --data-urlencode query={shlex.quote(args.query)}")
54
+ self.show(curl_cmd, only_show=args.show)
55
+ if args.show:
56
+ return False
57
+
58
+ # Launch query.
59
+ try:
60
+ start_time = time.time()
61
+ run_command(curl_cmd, show_output=True)
62
+ time_msecs = round(1000 * (time.time() - start_time))
63
+ if not args.no_time and args.log_level != "NO_LOG":
64
+ log.info("")
65
+ log.info(f"Query processing time (end-to-end):"
66
+ f" {time_msecs:,d} ms")
67
+ except Exception as e:
68
+ if args.log_level == "DEBUG":
69
+ traceback.print_exc()
70
+ log.error(e)
71
+ return False
72
+
73
+ return True
qlever/log.py CHANGED
@@ -33,7 +33,8 @@ log_levels = {
33
33
  "INFO": logging.INFO,
34
34
  "WARNING": logging.WARNING,
35
35
  "ERROR": logging.ERROR,
36
- "CRITICAL": logging.CRITICAL
36
+ "CRITICAL": logging.CRITICAL,
37
+ "NO_LOG": logging.CRITICAL + 1
37
38
  }
38
39
 
39
40
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: qlever
3
- Version: 0.5.0
3
+ Version: 0.5.2
4
4
  Summary: Script for using the QLever SPARQL engine.
5
5
  Author-email: Hannah Bast <bast@cs.uni-freiburg.de>
6
6
  License: Apache-2.0
@@ -3,24 +3,24 @@ qlever/__main__.py,sha256=MqM37bEzQeJEGUXZvuLcilIvnObZiG2eTGIkfKGpdnw,62016
3
3
  qlever/command.py,sha256=yOr0Uc8D8-AM7EjwDsVzbc3KNYjPH-FVOZhIHkqO588,2749
4
4
  qlever/config.py,sha256=qYPy-MQ7BwGrvKSazQWhs0lnlOFqm-d47mpZhc3fptc,10254
5
5
  qlever/containerize.py,sha256=p8g3O3G8a_0XLzSTzl_e5t9dqjbCQ-ippoA8vI2Z9pI,4193
6
- qlever/log.py,sha256=and5prQcLW_5nM8AAZNeNbVqJxLtJmX3EjHFhtHCR28,1279
6
+ qlever/log.py,sha256=2O_RvFymnu_dB10ErBTAOsI8bgjORfdD0tE3USH-siM,1315
7
7
  qlever/qlever_main.py,sha256=tA_xqOs_FjvqlDIvKTprwuysfTwzsUjE7at26gRhCVA,2336
8
8
  qlever/qlever_old.py,sha256=6sHosOfJzkURpdK4wXLdGl4SUtPnlsNEUwAqUeJiRYA,62026
9
9
  qlever/qleverfile.py,sha256=6Ll81xkzel_s2Ju9ZfBXUGlRfikaAzZM6Do-dTrdo3k,12934
10
10
  qlever/util.py,sha256=eepj0SY9JJOUQq5kvtoPnWfoLLV9fbw_sTEWKHet66E,7147
11
- qlever/Qleverfiles/Qleverfile.dblp,sha256=SFjBD20aOSWod4mEQnxHSDWdInoE_EFp2nyMw7ev7ZA,1167
11
+ qlever/Qleverfiles/Qleverfile.dblp,sha256=DjeyY0MGNrqEY-_8nRkjSsofGBBSz_wIgQ048KoRQBM,1002
12
12
  qlever/Qleverfiles/Qleverfile.dblp-plus,sha256=Dwd9pK1vPcelKfw6sA-IuyhbZ6yIxOh6_84JgPYnB9Q,1332
13
13
  qlever/Qleverfiles/Qleverfile.default,sha256=mljl6I1RCkpIWOqMQwjzPZIsarYQx1R0mIlc583KuqU,1869
14
- qlever/Qleverfiles/Qleverfile.dnb,sha256=yw4MmLsDPP3P5JWPgJwgPJh66TqwkyUXbQR5lSf5oHc,1511
14
+ qlever/Qleverfiles/Qleverfile.dnb,sha256=GgnsbtRUl__yMCqUX2EN5x1oDWpW93bUalMfUxN8534,1751
15
15
  qlever/Qleverfiles/Qleverfile.fbeasy,sha256=jeztW4gFpWL_w1nCH5qGHeZyZv2lz_kG6f1G3r3DkJ4,974
16
16
  qlever/Qleverfiles/Qleverfile.freebase,sha256=k6PqYrtHTBr0EydObm1Hg9QWyAAM9fXkdcjhReDg0fM,1035
17
17
  qlever/Qleverfiles/Qleverfile.imdb,sha256=uL5XlPwX01AmH-j6_Bc-PRm2fuPxGSIu8NaDflY525U,1623
18
- qlever/Qleverfiles/Qleverfile.ohm-planet,sha256=PVf9xf11KoPfKBAwYVrOAjibUZ2EG5giCSmllFqcmmQ,2493
18
+ qlever/Qleverfiles/Qleverfile.ohm-planet,sha256=Y_yUxdpWpUOSDo_zmVKj3caa8X-Wv-1KmacBMks_4QA,2650
19
19
  qlever/Qleverfiles/Qleverfile.olympics,sha256=5w9BOFwEBhdSzPz-0LRxwhv-7Gj6xbF539HOXr3cqD0,1088
20
20
  qlever/Qleverfiles/Qleverfile.osm-country,sha256=UnlkckSXJDrknZORlU-Hdj_J82U4kStl1aRctCc5n6M,1953
21
21
  qlever/Qleverfiles/Qleverfile.osm-planet,sha256=2RilNix0fplN3GsNNyOu3GzmUss1Pq7586WKOFAQnSs,1400
22
22
  qlever/Qleverfiles/Qleverfile.pubchem,sha256=bOhiJKUxzDiAm1UyXFPDQLYTqGc9jM8240fhobYLij0,3898
23
- qlever/Qleverfiles/Qleverfile.scientists,sha256=oFhzURcRFciA27GZ-ux_hsDe0esBLobWHC6h_Vf2xy8,1735
23
+ qlever/Qleverfiles/Qleverfile.scientists,sha256=9eZ2c6P9a3E3VHa3RR7LdOQbF4k3oyyrn56Z3u4LZYs,1164
24
24
  qlever/Qleverfiles/Qleverfile.uniprot,sha256=FS8QLHvujbjUYyU2Ma0PRgfCWlulviaGLc_1csxpuic,2201
25
25
  qlever/Qleverfiles/Qleverfile.vvz,sha256=ftdMj5dCC9jAlFtNt2WR7kP30w0itT_iYtj5HoUVyWU,931
26
26
  qlever/Qleverfiles/Qleverfile.wikidata,sha256=fhWSChZTH3c2y14kgP1P5Duq1SsewTOK3wETf6RRmI8,1172
@@ -35,15 +35,16 @@ qlever/commands/get_data.py,sha256=f9kjZI3TKad6JHSuXWNkeoajmW8h0Sx8ShvjauDCtNo,1
35
35
  qlever/commands/index.py,sha256=lJhDnweknFZQm1czqPzNyz33EvbjIvOrS4j0wDaJ98o,5663
36
36
  qlever/commands/index_stats.py,sha256=_BiUNBhmbYd9RPxrlm4HF0oENO6JmqnRiAkwkyOdN4U,11722
37
37
  qlever/commands/log.py,sha256=8Krt3MsTUDapYqVw1zUu5X15SF8mV97Uj0qKOWK8jXk,1861
38
+ qlever/commands/query.py,sha256=_IDH-M8gKL_f1i5wzu0X452pZSUD0_qXl6bPXC85wX0,2750
38
39
  qlever/commands/setup_config.py,sha256=6T0rXrIdejKMKhDbOMEMBKyMF_hAqO5nJaRFb57QPQU,2964
39
40
  qlever/commands/start.py,sha256=2rOtk3NmhEs28D5csL_a1BdjSWU9VkcH6AqYT0vdww0,9285
40
41
  qlever/commands/status.py,sha256=5S6EdapZEwFKV9cQZtNYcZhMbAXAY-FP6ggjIhfX8ek,1631
41
42
  qlever/commands/stop.py,sha256=TZs4bxKHvujlZAU8BZmFjA5eXSZNAa6EeNzvPpEZsuI,4139
42
43
  qlever/commands/ui.py,sha256=rV8u017WLbfz0zVT_c9GC4d9v1WWwrTM3kfGONbeCvQ,2499
43
44
  qlever/commands/warmup.py,sha256=WOZSxeV8U_F6pEEnAb6YybXLQMxZFTRJXs4BPHUhsmc,1030
44
- qlever-0.5.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
45
- qlever-0.5.0.dist-info/METADATA,sha256=GtQdaBBOd5WYb2_UrqpTawHjJsKkEbEI6qyyaVi4rwY,4146
46
- qlever-0.5.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
47
- qlever-0.5.0.dist-info/entry_points.txt,sha256=U_gbYYi0wwdsn884eb0XoOXfvhACOsxhlO330dZ9bi0,87
48
- qlever-0.5.0.dist-info/top_level.txt,sha256=kd3zsYqiFd0--Czh5XTVkfEq6XR-XgRFW35X0v0GT-c,7
49
- qlever-0.5.0.dist-info/RECORD,,
45
+ qlever-0.5.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
46
+ qlever-0.5.2.dist-info/METADATA,sha256=MHlNH1mplzq3hJ0ETmjUqM4Zw6zLqvends9IT60M4Io,4146
47
+ qlever-0.5.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
48
+ qlever-0.5.2.dist-info/entry_points.txt,sha256=U_gbYYi0wwdsn884eb0XoOXfvhACOsxhlO330dZ9bi0,87
49
+ qlever-0.5.2.dist-info/top_level.txt,sha256=kd3zsYqiFd0--Czh5XTVkfEq6XR-XgRFW35X0v0GT-c,7
50
+ qlever-0.5.2.dist-info/RECORD,,
File without changes