openedx-learning 0.27.1__py2.py3-none-any.whl → 0.28.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openedx_learning/__init__.py +1 -1
- openedx_learning/apps/authoring/backup_restore/toml.py +5 -4
- openedx_learning/apps/authoring/backup_restore/zipper.py +201 -12
- openedx_learning/apps/authoring/publishing/api.py +29 -8
- {openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/METADATA +5 -5
- {openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/RECORD +10 -10
- openedx_tagging/core/tagging/models/utils.py +37 -9
- {openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/WHEEL +0 -0
- {openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/licenses/LICENSE.txt +0 -0
- {openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/top_level.txt +0 -0
openedx_learning/__init__.py
CHANGED
|
@@ -6,6 +6,7 @@ from datetime import datetime
|
|
|
6
6
|
|
|
7
7
|
import tomlkit
|
|
8
8
|
|
|
9
|
+
from openedx_learning.apps.authoring.publishing import api as publishing_api
|
|
9
10
|
from openedx_learning.apps.authoring.publishing.models import PublishableEntity, PublishableEntityVersion
|
|
10
11
|
from openedx_learning.apps.authoring.publishing.models.learning_package import LearningPackage
|
|
11
12
|
|
|
@@ -27,8 +28,8 @@ def toml_learning_package(learning_package: LearningPackage) -> str:
|
|
|
27
28
|
def toml_publishable_entity(entity: PublishableEntity) -> str:
|
|
28
29
|
"""Create a TOML representation of a publishable entity."""
|
|
29
30
|
|
|
30
|
-
current_draft_version =
|
|
31
|
-
current_published_version =
|
|
31
|
+
current_draft_version = publishing_api.get_draft_version(entity)
|
|
32
|
+
current_published_version = publishing_api.get_published_version(entity)
|
|
32
33
|
|
|
33
34
|
doc = tomlkit.document()
|
|
34
35
|
entity_table = tomlkit.table()
|
|
@@ -37,12 +38,12 @@ def toml_publishable_entity(entity: PublishableEntity) -> str:
|
|
|
37
38
|
|
|
38
39
|
if current_draft_version:
|
|
39
40
|
draft_table = tomlkit.table()
|
|
40
|
-
draft_table.add("version_num", current_draft_version.
|
|
41
|
+
draft_table.add("version_num", current_draft_version.version_num)
|
|
41
42
|
entity_table.add("draft", draft_table)
|
|
42
43
|
|
|
43
44
|
published_table = tomlkit.table()
|
|
44
45
|
if current_published_version:
|
|
45
|
-
published_table.add("version_num", current_published_version.
|
|
46
|
+
published_table.add("version_num", current_published_version.version_num)
|
|
46
47
|
else:
|
|
47
48
|
published_table.add(tomlkit.comment("unpublished: no published_version_num"))
|
|
48
49
|
entity_table.add("published", published_table)
|
|
@@ -2,16 +2,54 @@
|
|
|
2
2
|
This module provides functionality to create a zip file containing the learning package data,
|
|
3
3
|
including a TOML representation of the learning package and its entities.
|
|
4
4
|
"""
|
|
5
|
+
import hashlib
|
|
5
6
|
import zipfile
|
|
6
7
|
from pathlib import Path
|
|
8
|
+
from typing import List, Optional
|
|
7
9
|
|
|
10
|
+
from django.db.models import Prefetch, QuerySet
|
|
11
|
+
from django.utils.text import slugify
|
|
12
|
+
|
|
13
|
+
from openedx_learning.api.authoring_models import (
|
|
14
|
+
ComponentVersion,
|
|
15
|
+
ComponentVersionContent,
|
|
16
|
+
Content,
|
|
17
|
+
LearningPackage,
|
|
18
|
+
PublishableEntity,
|
|
19
|
+
PublishableEntityVersion,
|
|
20
|
+
)
|
|
8
21
|
from openedx_learning.apps.authoring.backup_restore.toml import toml_learning_package, toml_publishable_entity
|
|
9
22
|
from openedx_learning.apps.authoring.publishing import api as publishing_api
|
|
10
|
-
from openedx_learning.apps.authoring.publishing.models.learning_package import LearningPackage
|
|
11
23
|
|
|
12
24
|
TOML_PACKAGE_NAME = "package.toml"
|
|
13
25
|
|
|
14
26
|
|
|
27
|
+
def slugify_hashed_filename(identifier: str) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Generate a filesystem-safe filename from an identifier.
|
|
30
|
+
|
|
31
|
+
Why:
|
|
32
|
+
Identifiers may contain characters that are invalid or ambiguous
|
|
33
|
+
in filesystems (e.g., slashes, colons, case differences).
|
|
34
|
+
Additionally, two different identifiers might normalize to the same
|
|
35
|
+
slug after cleaning. To avoid collisions and ensure uniqueness,
|
|
36
|
+
we append a short blake2b hash.
|
|
37
|
+
|
|
38
|
+
What:
|
|
39
|
+
- Slugify the identifier (preserves most characters, only strips
|
|
40
|
+
filesystem-invalid ones).
|
|
41
|
+
- Append a short hash for uniqueness.
|
|
42
|
+
- Result: human-readable but still unique and filesystem-safe filename.
|
|
43
|
+
"""
|
|
44
|
+
slug = slugify(identifier, allow_unicode=True)
|
|
45
|
+
# Short digest ensures uniqueness without overly long filenames
|
|
46
|
+
short_hash = hashlib.blake2b(
|
|
47
|
+
identifier.encode("utf-8"),
|
|
48
|
+
digest_size=3,
|
|
49
|
+
).hexdigest()
|
|
50
|
+
return f"{slug}_{short_hash}"
|
|
51
|
+
|
|
52
|
+
|
|
15
53
|
class LearningPackageZipper:
|
|
16
54
|
"""
|
|
17
55
|
A class to handle the zipping of learning content for backup and restore.
|
|
@@ -19,6 +57,66 @@ class LearningPackageZipper:
|
|
|
19
57
|
|
|
20
58
|
def __init__(self, learning_package: LearningPackage):
|
|
21
59
|
self.learning_package = learning_package
|
|
60
|
+
self.folders_already_created: set[Path] = set()
|
|
61
|
+
|
|
62
|
+
def create_folder(self, folder_path: Path, zip_file: zipfile.ZipFile) -> None:
|
|
63
|
+
"""
|
|
64
|
+
Create a folder for the zip file structure.
|
|
65
|
+
Skips creating the folder if it already exists based on the folder path.
|
|
66
|
+
Args:
|
|
67
|
+
folder_path (Path): The path of the folder to create.
|
|
68
|
+
"""
|
|
69
|
+
if folder_path not in self.folders_already_created:
|
|
70
|
+
zip_info = zipfile.ZipInfo(str(folder_path) + "/")
|
|
71
|
+
zip_file.writestr(zip_info, "") # Add explicit empty directory entry
|
|
72
|
+
self.folders_already_created.add(folder_path)
|
|
73
|
+
|
|
74
|
+
def get_publishable_entities(self) -> QuerySet[PublishableEntity]:
|
|
75
|
+
"""
|
|
76
|
+
Retrieve the publishable entities associated with the learning package.
|
|
77
|
+
Prefetches related data for efficiency.
|
|
78
|
+
"""
|
|
79
|
+
lp_id = self.learning_package.pk
|
|
80
|
+
publishable_entities: QuerySet[PublishableEntity] = publishing_api.get_publishable_entities(lp_id)
|
|
81
|
+
return (
|
|
82
|
+
publishable_entities
|
|
83
|
+
.select_related(
|
|
84
|
+
"container",
|
|
85
|
+
"component__component_type",
|
|
86
|
+
"draft__version__componentversion",
|
|
87
|
+
"published__version__componentversion",
|
|
88
|
+
)
|
|
89
|
+
.prefetch_related(
|
|
90
|
+
# We should re-evaluate the prefetching strategy here,
|
|
91
|
+
# as the current approach may cause performance issues—
|
|
92
|
+
# especially with large libraries (up to 100K items),
|
|
93
|
+
# which is too large for this type of prefetch.
|
|
94
|
+
Prefetch(
|
|
95
|
+
"draft__version__componentversion__componentversioncontent_set",
|
|
96
|
+
queryset=ComponentVersionContent.objects.select_related("content"),
|
|
97
|
+
to_attr="prefetched_contents",
|
|
98
|
+
),
|
|
99
|
+
Prefetch(
|
|
100
|
+
"published__version__componentversion__componentversioncontent_set",
|
|
101
|
+
queryset=ComponentVersionContent.objects.select_related("content"),
|
|
102
|
+
to_attr="prefetched_contents",
|
|
103
|
+
),
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def get_versions_to_write(self, entity: PublishableEntity):
|
|
108
|
+
"""
|
|
109
|
+
Get the versions of a publishable entity that should be written to the zip file.
|
|
110
|
+
It retrieves both draft and published versions.
|
|
111
|
+
"""
|
|
112
|
+
draft_version: Optional[PublishableEntityVersion] = publishing_api.get_draft_version(entity)
|
|
113
|
+
published_version: Optional[PublishableEntityVersion] = publishing_api.get_published_version(entity)
|
|
114
|
+
|
|
115
|
+
versions_to_write = [draft_version] if draft_version else []
|
|
116
|
+
|
|
117
|
+
if published_version and published_version != draft_version:
|
|
118
|
+
versions_to_write.append(published_version)
|
|
119
|
+
return versions_to_write
|
|
22
120
|
|
|
23
121
|
def create_zip(self, path: str) -> None:
|
|
24
122
|
"""
|
|
@@ -28,26 +126,117 @@ class LearningPackageZipper:
|
|
|
28
126
|
Raises:
|
|
29
127
|
Exception: If the learning package cannot be found or if the zip creation fails.
|
|
30
128
|
"""
|
|
31
|
-
package_toml_content: str = toml_learning_package(self.learning_package)
|
|
32
129
|
|
|
33
130
|
with zipfile.ZipFile(path, "w", compression=zipfile.ZIP_DEFLATED) as zipf:
|
|
34
|
-
# Add the package.toml
|
|
131
|
+
# Add the package.toml file
|
|
132
|
+
package_toml_content: str = toml_learning_package(self.learning_package)
|
|
35
133
|
zipf.writestr(TOML_PACKAGE_NAME, package_toml_content)
|
|
36
134
|
|
|
37
135
|
# Add the entities directory
|
|
38
136
|
entities_folder = Path("entities")
|
|
39
|
-
|
|
40
|
-
zipf.writestr(zip_info, "") # Add explicit empty directory entry
|
|
137
|
+
self.create_folder(entities_folder, zipf)
|
|
41
138
|
|
|
42
139
|
# Add the collections directory
|
|
43
140
|
collections_folder = Path("collections")
|
|
44
|
-
|
|
45
|
-
|
|
141
|
+
self.create_folder(collections_folder, zipf)
|
|
142
|
+
|
|
143
|
+
# ------ ENTITIES SERIALIZATION -------------
|
|
144
|
+
|
|
145
|
+
# get the publishable entities
|
|
146
|
+
publishable_entities: QuerySet[PublishableEntity] = self.get_publishable_entities()
|
|
147
|
+
|
|
148
|
+
for entity in publishable_entities:
|
|
149
|
+
# entity: PublishableEntity = entity # Type hint for clarity
|
|
46
150
|
|
|
47
|
-
# Add each entity's TOML file
|
|
48
|
-
for entity in publishing_api.get_entities(self.learning_package.pk):
|
|
49
151
|
# Create a TOML representation of the entity
|
|
50
152
|
entity_toml_content: str = toml_publishable_entity(entity)
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
153
|
+
|
|
154
|
+
if hasattr(entity, 'container'):
|
|
155
|
+
entity_slugify_hash = slugify_hashed_filename(entity.key)
|
|
156
|
+
entity_toml_filename = f"{entity_slugify_hash}.toml"
|
|
157
|
+
entity_toml_path = entities_folder / entity_toml_filename
|
|
158
|
+
zipf.writestr(str(entity_toml_path), entity_toml_content)
|
|
159
|
+
|
|
160
|
+
if hasattr(entity, 'component'):
|
|
161
|
+
# Create the component folder structure for the entity. The structure is as follows:
|
|
162
|
+
# entities/
|
|
163
|
+
# xblock.v1/ (component namespace)
|
|
164
|
+
# html/ (component type)
|
|
165
|
+
# my_component.toml (entity TOML file)
|
|
166
|
+
# my_component/ (component id)
|
|
167
|
+
# component_versions/
|
|
168
|
+
# v1/
|
|
169
|
+
# static/
|
|
170
|
+
|
|
171
|
+
# Generate the slugified hash for the component local key
|
|
172
|
+
# Example: if the local key is "my_component", the slugified hash might be "my_component_123456"
|
|
173
|
+
# It's a combination of the local key and a hash and should be unique
|
|
174
|
+
entity_slugify_hash = slugify_hashed_filename(entity.component.local_key)
|
|
175
|
+
|
|
176
|
+
# Create the component namespace folder
|
|
177
|
+
# Example of component namespace is: "entities/xblock.v1/"
|
|
178
|
+
component_namespace_folder = entities_folder / entity.component.component_type.namespace
|
|
179
|
+
self.create_folder(component_namespace_folder, zipf)
|
|
180
|
+
|
|
181
|
+
# Create the component type folder
|
|
182
|
+
# Example of component type is: "entities/xblock.v1/html/"
|
|
183
|
+
component_type_folder = component_namespace_folder / entity.component.component_type.name
|
|
184
|
+
self.create_folder(component_type_folder, zipf)
|
|
185
|
+
|
|
186
|
+
# Create the component id folder
|
|
187
|
+
# Example of component id is: "entities/xblock.v1/html/my_component_123456/"
|
|
188
|
+
component_id_folder = component_type_folder / entity_slugify_hash
|
|
189
|
+
self.create_folder(component_id_folder, zipf)
|
|
190
|
+
|
|
191
|
+
# Add the entity TOML file inside the component type folder as well
|
|
192
|
+
# Example: "entities/xblock.v1/html/my_component_123456.toml"
|
|
193
|
+
component_entity_toml_path = component_type_folder / f"{entity_slugify_hash}.toml"
|
|
194
|
+
zipf.writestr(str(component_entity_toml_path), entity_toml_content)
|
|
195
|
+
|
|
196
|
+
# Add component version folder into the component id folder
|
|
197
|
+
# Example: "entities/xblock.v1/html/my_component_123456/component_versions/"
|
|
198
|
+
component_version_folder = component_id_folder / "component_versions"
|
|
199
|
+
self.create_folder(component_version_folder, zipf)
|
|
200
|
+
|
|
201
|
+
# ------ COMPONENT VERSIONING -------------
|
|
202
|
+
# Focusing on draft and published versions
|
|
203
|
+
|
|
204
|
+
# Get the draft and published versions
|
|
205
|
+
versions_to_write: List[PublishableEntityVersion] = self.get_versions_to_write(entity)
|
|
206
|
+
|
|
207
|
+
for version in versions_to_write:
|
|
208
|
+
# Create a folder for the version
|
|
209
|
+
version_number = f"v{version.version_num}"
|
|
210
|
+
version_folder = component_version_folder / version_number
|
|
211
|
+
self.create_folder(version_folder, zipf)
|
|
212
|
+
|
|
213
|
+
# Add static folder for the version
|
|
214
|
+
static_folder = version_folder / "static"
|
|
215
|
+
self.create_folder(static_folder, zipf)
|
|
216
|
+
|
|
217
|
+
# ------ COMPONENT STATIC CONTENT -------------
|
|
218
|
+
component_version: ComponentVersion = version.componentversion
|
|
219
|
+
|
|
220
|
+
# Get content data associated with this version
|
|
221
|
+
contents: QuerySet[
|
|
222
|
+
ComponentVersionContent
|
|
223
|
+
] = component_version.prefetched_contents # type: ignore[attr-defined]
|
|
224
|
+
|
|
225
|
+
for component_version_content in contents:
|
|
226
|
+
content: Content = component_version_content.content
|
|
227
|
+
|
|
228
|
+
# Important: The component_version_content.key contains implicitly
|
|
229
|
+
# the file name and the file extension
|
|
230
|
+
file_path = version_folder / component_version_content.key
|
|
231
|
+
|
|
232
|
+
if content.has_file and content.path:
|
|
233
|
+
# If has_file, we pull it from the file system
|
|
234
|
+
with content.read_file() as f:
|
|
235
|
+
file_data = f.read()
|
|
236
|
+
elif not content.has_file and content.text:
|
|
237
|
+
# Otherwise, we use the text content as the file data
|
|
238
|
+
file_data = content.text
|
|
239
|
+
else:
|
|
240
|
+
# If no file and no text, we skip this content
|
|
241
|
+
continue
|
|
242
|
+
zipf.writestr(str(file_path), file_data)
|
|
@@ -10,7 +10,7 @@ from contextlib import nullcontext
|
|
|
10
10
|
from dataclasses import dataclass
|
|
11
11
|
from datetime import datetime, timezone
|
|
12
12
|
from enum import Enum
|
|
13
|
-
from typing import ContextManager, TypeVar
|
|
13
|
+
from typing import ContextManager, Optional, TypeVar
|
|
14
14
|
|
|
15
15
|
from django.core.exceptions import ObjectDoesNotExist, ValidationError
|
|
16
16
|
from django.db.models import F, Q, QuerySet
|
|
@@ -58,9 +58,9 @@ __all__ = [
|
|
|
58
58
|
"create_publishable_entity_version",
|
|
59
59
|
"get_publishable_entity",
|
|
60
60
|
"get_publishable_entity_by_key",
|
|
61
|
+
"get_publishable_entities",
|
|
61
62
|
"get_last_publish",
|
|
62
63
|
"get_all_drafts",
|
|
63
|
-
"get_entities",
|
|
64
64
|
"get_entities_with_unpublished_changes",
|
|
65
65
|
"get_entities_with_unpublished_deletes",
|
|
66
66
|
"publish_all_drafts",
|
|
@@ -262,11 +262,18 @@ def get_all_drafts(learning_package_id: int, /) -> QuerySet[Draft]:
|
|
|
262
262
|
)
|
|
263
263
|
|
|
264
264
|
|
|
265
|
-
def
|
|
265
|
+
def get_publishable_entities(learning_package_id: int, /) -> QuerySet[PublishableEntity]:
|
|
266
266
|
"""
|
|
267
267
|
Get all entities in a learning package.
|
|
268
268
|
"""
|
|
269
|
-
return
|
|
269
|
+
return (
|
|
270
|
+
PublishableEntity.objects
|
|
271
|
+
.filter(learning_package_id=learning_package_id)
|
|
272
|
+
.select_related(
|
|
273
|
+
"draft__version",
|
|
274
|
+
"published__version",
|
|
275
|
+
)
|
|
276
|
+
)
|
|
270
277
|
|
|
271
278
|
|
|
272
279
|
def get_entities_with_unpublished_changes(
|
|
@@ -425,15 +432,22 @@ def publish_from_drafts(
|
|
|
425
432
|
return publish_log
|
|
426
433
|
|
|
427
434
|
|
|
428
|
-
def get_draft_version(
|
|
435
|
+
def get_draft_version(publishable_entity_or_id: PublishableEntity | int, /) -> PublishableEntityVersion | None:
|
|
429
436
|
"""
|
|
430
437
|
Return current draft PublishableEntityVersion for this PublishableEntity.
|
|
431
438
|
|
|
432
439
|
This function will return None if there is no current draft.
|
|
433
440
|
"""
|
|
441
|
+
if isinstance(publishable_entity_or_id, PublishableEntity):
|
|
442
|
+
# Fetches the draft version for a given PublishableEntity.
|
|
443
|
+
# Gracefully handles cases where no draft is present.
|
|
444
|
+
draft: Optional[Draft] = getattr(publishable_entity_or_id, "draft", None)
|
|
445
|
+
if draft is None:
|
|
446
|
+
return None
|
|
447
|
+
return draft.version
|
|
434
448
|
try:
|
|
435
449
|
draft = Draft.objects.select_related("version").get(
|
|
436
|
-
entity_id=
|
|
450
|
+
entity_id=publishable_entity_or_id
|
|
437
451
|
)
|
|
438
452
|
except Draft.DoesNotExist:
|
|
439
453
|
# No draft was ever created.
|
|
@@ -445,15 +459,22 @@ def get_draft_version(publishable_entity_id: int, /) -> PublishableEntityVersion
|
|
|
445
459
|
return draft.version
|
|
446
460
|
|
|
447
461
|
|
|
448
|
-
def get_published_version(
|
|
462
|
+
def get_published_version(publishable_entity_or_id: PublishableEntity | int, /) -> PublishableEntityVersion | None:
|
|
449
463
|
"""
|
|
450
464
|
Return current published PublishableEntityVersion for this PublishableEntity.
|
|
451
465
|
|
|
452
466
|
This function will return None if there is no current published version.
|
|
453
467
|
"""
|
|
468
|
+
if isinstance(publishable_entity_or_id, PublishableEntity):
|
|
469
|
+
# Fetches the published version for a given PublishableEntity.
|
|
470
|
+
# Gracefully handles cases where no published version is present.
|
|
471
|
+
published: Optional[Published] = getattr(publishable_entity_or_id, "published", None)
|
|
472
|
+
if published is None:
|
|
473
|
+
return None
|
|
474
|
+
return published.version
|
|
454
475
|
try:
|
|
455
476
|
published = Published.objects.select_related("version").get(
|
|
456
|
-
entity_id=
|
|
477
|
+
entity_id=publishable_entity_or_id
|
|
457
478
|
)
|
|
458
479
|
except Published.DoesNotExist:
|
|
459
480
|
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openedx-learning
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.28.0
|
|
4
4
|
Summary: Open edX Learning Core and Tagging.
|
|
5
5
|
Home-page: https://github.com/openedx/openedx-learning
|
|
6
6
|
Author: David Ormsbee
|
|
@@ -19,13 +19,13 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
19
19
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
20
|
Requires-Python: >=3.11
|
|
21
21
|
License-File: LICENSE.txt
|
|
22
|
-
Requires-Dist: djangorestframework<4.0
|
|
23
|
-
Requires-Dist: tomlkit
|
|
24
22
|
Requires-Dist: celery
|
|
23
|
+
Requires-Dist: rules<4.0
|
|
25
24
|
Requires-Dist: Django
|
|
26
|
-
Requires-Dist: attrs
|
|
27
25
|
Requires-Dist: edx-drf-extensions
|
|
28
|
-
Requires-Dist:
|
|
26
|
+
Requires-Dist: tomlkit
|
|
27
|
+
Requires-Dist: attrs
|
|
28
|
+
Requires-Dist: djangorestframework<4.0
|
|
29
29
|
Dynamic: author
|
|
30
30
|
Dynamic: author-email
|
|
31
31
|
Dynamic: classifier
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
openedx_learning/__init__.py,sha256=
|
|
1
|
+
openedx_learning/__init__.py,sha256=uNMwU0K5IBvKkVUxhRpjU7JmSuDVu7oR_QPr65ElCPE,69
|
|
2
2
|
openedx_learning/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
openedx_learning/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
openedx_learning/api/authoring.py,sha256=EDWTY_JDKtjD9nFrrijzWuVccs3LZeDLEdzTUNanR4I,1111
|
|
@@ -10,8 +10,8 @@ openedx_learning/apps/authoring/backup_restore/admin.py,sha256=OnEixkOuysPRr-F6C
|
|
|
10
10
|
openedx_learning/apps/authoring/backup_restore/api.py,sha256=zEns3crvfFEFFh7MmwzSqW0WuGmZaSgdmujzl0PnfvU,508
|
|
11
11
|
openedx_learning/apps/authoring/backup_restore/apps.py,sha256=UnExBA7jhd3qI30_87JMvzVhS_k82t89qDVKSMpvg_A,340
|
|
12
12
|
openedx_learning/apps/authoring/backup_restore/models.py,sha256=jlr0ppxW0IOW3HPHoJNChHvDrYVnKMb5_3uC2itxqQk,45
|
|
13
|
-
openedx_learning/apps/authoring/backup_restore/toml.py,sha256=
|
|
14
|
-
openedx_learning/apps/authoring/backup_restore/zipper.py,sha256=
|
|
13
|
+
openedx_learning/apps/authoring/backup_restore/toml.py,sha256=8D103aAfCtzVPV4U-yuB-HsXvNCO3cs3xhvMJTI4IXo,2955
|
|
14
|
+
openedx_learning/apps/authoring/backup_restore/zipper.py,sha256=_SWgnfLHziFLWF-iXf_38L8v6C-yO8PjyZHxDCtaJ0k,11494
|
|
15
15
|
openedx_learning/apps/authoring/backup_restore/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
openedx_learning/apps/authoring/backup_restore/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
17
|
openedx_learning/apps/authoring/backup_restore/management/commands/lp_dump.py,sha256=TkbyBf9Jsa7yoXiGEduO0ZqKTYO7vWGHbqr5NbEclRs,1696
|
|
@@ -48,7 +48,7 @@ openedx_learning/apps/authoring/contents/migrations/0001_initial.py,sha256=FtOTm
|
|
|
48
48
|
openedx_learning/apps/authoring/contents/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
49
|
openedx_learning/apps/authoring/publishing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
50
50
|
openedx_learning/apps/authoring/publishing/admin.py,sha256=nvAAl3Xswqqq3WyaI1NT7pLCcu1o-ynciJZOlc-9L24,16244
|
|
51
|
-
openedx_learning/apps/authoring/publishing/api.py,sha256=
|
|
51
|
+
openedx_learning/apps/authoring/publishing/api.py,sha256=yNHqupNDY4FPNmQYnwMr1Sey_t85CjrpYpNDFh9nfF8,58162
|
|
52
52
|
openedx_learning/apps/authoring/publishing/apps.py,sha256=PXYIx-TwN7a8dDudodX80Z7hNV9bWzrMZnpDET8lCGE,758
|
|
53
53
|
openedx_learning/apps/authoring/publishing/contextmanagers.py,sha256=AH5zhr0Tz_gUG9--dfr_oZAu8DMy94n6mnOJuPbWkeU,6723
|
|
54
54
|
openedx_learning/apps/authoring/publishing/migrations/0001_initial.py,sha256=wvekNV19YRSdxRmQaFnLSn_nCsQlHIucPDVMmgKf_OE,9272
|
|
@@ -101,7 +101,7 @@ openedx_learning/lib/fields.py,sha256=eiGoXMPhRuq25EH2qf6BAODshAQE3DBVdIYAMIUAXW
|
|
|
101
101
|
openedx_learning/lib/managers.py,sha256=-Q3gxalSqyPZ9Im4DTROW5tF8wVTZLlmfTe62_xmowY,1643
|
|
102
102
|
openedx_learning/lib/test_utils.py,sha256=g3KLuepIZbaDBCsaj9711YuqyUx7LD4gXDcfNC-mWdc,527
|
|
103
103
|
openedx_learning/lib/validators.py,sha256=iqEdEAvFV2tC7Ecssx69kjecpdU8nE87AlDJYrqrsnc,404
|
|
104
|
-
openedx_learning-0.
|
|
104
|
+
openedx_learning-0.28.0.dist-info/licenses/LICENSE.txt,sha256=QTW2QN7q3XszgUAXm9Dzgtu5LXYKbR1SGnqMa7ufEuY,35139
|
|
105
105
|
openedx_tagging/__init__.py,sha256=V9N8M7f9LYlAbA_DdPUsHzTnWjYRXKGa5qHw9P1JnNI,30
|
|
106
106
|
openedx_tagging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
107
107
|
openedx_tagging/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -146,7 +146,7 @@ openedx_tagging/core/tagging/models/__init__.py,sha256=yYdOnthuc7EUdfEULtZgqRwn5
|
|
|
146
146
|
openedx_tagging/core/tagging/models/base.py,sha256=ju4mvgRS_I2AgPsRf4sMFy6qle2i0aA0MbyBYZXf32g,39685
|
|
147
147
|
openedx_tagging/core/tagging/models/import_export.py,sha256=Aj0pleh0nh2LNS6zmdB1P4bpdgUMmvmobTkqBerORAI,4570
|
|
148
148
|
openedx_tagging/core/tagging/models/system_defined.py,sha256=_6LfvUZGEltvQMtm2OXy6TOLh3C8GnVTqtZDSAZW6K4,9062
|
|
149
|
-
openedx_tagging/core/tagging/models/utils.py,sha256
|
|
149
|
+
openedx_tagging/core/tagging/models/utils.py,sha256=x2dg3IYE3RVOZtInkwKtznHcByDmc2R-sBMVWksHnjs,2917
|
|
150
150
|
openedx_tagging/core/tagging/rest_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
151
151
|
openedx_tagging/core/tagging/rest_api/paginators.py,sha256=BUIAg3taihHx7uAjpTZAGK1xSZzZY9G0aib4OKv5c0k,2651
|
|
152
152
|
openedx_tagging/core/tagging/rest_api/urls.py,sha256=egXaRQv1EAgF04ThgVZBQuvLK1LimuyUKKBD2Hbqb10,148
|
|
@@ -157,7 +157,7 @@ openedx_tagging/core/tagging/rest_api/v1/serializers.py,sha256=0HQD_Jrf6-YpocYfz
|
|
|
157
157
|
openedx_tagging/core/tagging/rest_api/v1/urls.py,sha256=dNUKCtUCx_YzrwlbEbpDfjGVQbb2QdJ1VuJCkladj6E,752
|
|
158
158
|
openedx_tagging/core/tagging/rest_api/v1/views.py,sha256=Hf92cy-tE767DE9FgsZcPKiCYrf5ihfETz8qGKBnuiU,36278
|
|
159
159
|
openedx_tagging/core/tagging/rest_api/v1/views_import.py,sha256=kbHUPe5A6WaaJ3J1lFIcYCt876ecLNQfd19m7YYub6c,1470
|
|
160
|
-
openedx_learning-0.
|
|
161
|
-
openedx_learning-0.
|
|
162
|
-
openedx_learning-0.
|
|
163
|
-
openedx_learning-0.
|
|
160
|
+
openedx_learning-0.28.0.dist-info/METADATA,sha256=2B4GnwRZKKZfm9RmEkeJITaAaGGHi4lfvnhPkW8a5Jk,9055
|
|
161
|
+
openedx_learning-0.28.0.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
162
|
+
openedx_learning-0.28.0.dist-info/top_level.txt,sha256=IYFbr5mgiEHd-LOtZmXj3q3a0bkGK1M9LY7GXgnfi4M,33
|
|
163
|
+
openedx_learning-0.28.0.dist-info/RECORD,,
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Utilities for tagging and taxonomy models
|
|
3
3
|
"""
|
|
4
|
-
from django.db
|
|
5
|
-
from django.db.models
|
|
4
|
+
from django.db import connection as db_connection
|
|
5
|
+
from django.db.models import Aggregate, CharField, TextField
|
|
6
|
+
from django.db.models.expressions import Combinable, Func
|
|
6
7
|
|
|
7
8
|
RESERVED_TAG_CHARS = [
|
|
8
9
|
'\t', # Used in the database to separate tag levels in the "lineage" field
|
|
@@ -34,21 +35,48 @@ class ConcatNull(Func): # pylint: disable=abstract-method
|
|
|
34
35
|
)
|
|
35
36
|
|
|
36
37
|
|
|
37
|
-
class StringAgg(Aggregate):
|
|
38
|
+
class StringAgg(Aggregate, Combinable):
|
|
38
39
|
"""
|
|
39
40
|
Aggregate function that collects the values of some column across all rows,
|
|
40
|
-
and creates a string by concatenating those values, with
|
|
41
|
+
and creates a string by concatenating those values, with a specified separator.
|
|
41
42
|
|
|
42
|
-
This
|
|
43
|
-
but this version works with MySQL and SQLite.
|
|
43
|
+
This version supports PostgreSQL (STRING_AGG), MySQL (GROUP_CONCAT), and SQLite.
|
|
44
44
|
"""
|
|
45
|
+
# Default function is for MySQL (GROUP_CONCAT)
|
|
45
46
|
function = 'GROUP_CONCAT'
|
|
46
47
|
template = '%(function)s(%(distinct)s%(expressions)s)'
|
|
47
48
|
|
|
48
|
-
def __init__(self, expression, distinct=False, **extra):
|
|
49
|
+
def __init__(self, expression, distinct=False, delimiter=',', **extra):
|
|
50
|
+
self.delimiter = delimiter
|
|
51
|
+
# Handle the distinct option and output type
|
|
52
|
+
distinct_str = 'DISTINCT ' if distinct else ''
|
|
53
|
+
|
|
54
|
+
extra.update({
|
|
55
|
+
'distinct': distinct_str,
|
|
56
|
+
'output_field': CharField(),
|
|
57
|
+
})
|
|
58
|
+
|
|
59
|
+
# Check the database backend (PostgreSQL, MySQL, or SQLite)
|
|
60
|
+
if 'postgresql' in db_connection.vendor.lower():
|
|
61
|
+
self.function = 'STRING_AGG'
|
|
62
|
+
self.template = '%(function)s(%(distinct)s%(expressions)s, %(delimiter)s)'
|
|
63
|
+
extra.update({
|
|
64
|
+
"delimiter": self.delimiter,
|
|
65
|
+
"output_field": TextField(),
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
# Initialize the parent class with the necessary parameters
|
|
49
69
|
super().__init__(
|
|
50
70
|
expression,
|
|
51
|
-
distinct='DISTINCT ' if distinct else '',
|
|
52
|
-
output_field=CharField(),
|
|
53
71
|
**extra,
|
|
54
72
|
)
|
|
73
|
+
|
|
74
|
+
# Implementing abstract methods from Combinable
|
|
75
|
+
def __rand__(self, other):
|
|
76
|
+
return self._combine(other, 'AND', False)
|
|
77
|
+
|
|
78
|
+
def __ror__(self, other):
|
|
79
|
+
return self._combine(other, 'OR', False)
|
|
80
|
+
|
|
81
|
+
def __rxor__(self, other):
|
|
82
|
+
return self._combine(other, 'XOR', False)
|
|
File without changes
|
{openedx_learning-0.27.1.dist-info → openedx_learning-0.28.0.dist-info}/licenses/LICENSE.txt
RENAMED
|
File without changes
|
|
File without changes
|