aepp 0.4.1.post2__tar.gz → 0.4.2.post1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {aepp-0.4.1.post2/aepp.egg-info → aepp-0.4.2.post1}/PKG-INFO +1 -1
  2. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/__init__.py +53 -5
  3. aepp-0.4.2.post1/aepp/__version__.py +1 -0
  4. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/queryservice.py +21 -13
  5. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/synchronizer.py +274 -27
  6. {aepp-0.4.1.post2 → aepp-0.4.2.post1/aepp.egg-info}/PKG-INFO +1 -1
  7. aepp-0.4.1.post2/aepp/__version__.py +0 -1
  8. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/LICENSE +0 -0
  9. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/MANIFEST.in +0 -0
  10. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/README.md +0 -0
  11. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/accesscontrol.py +0 -0
  12. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/catalog.py +0 -0
  13. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/classmanager.py +0 -0
  14. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/config.py +0 -0
  15. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/configs.py +0 -0
  16. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/connector.py +0 -0
  17. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/customerprofile.py +0 -0
  18. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/dataaccess.py +0 -0
  19. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/dataprep.py +0 -0
  20. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/datasets.py +0 -0
  21. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/datatypemanager.py +0 -0
  22. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/deletion.py +0 -0
  23. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/destination.py +0 -0
  24. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/destinationinstanceservice.py +0 -0
  25. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/edge.py +0 -0
  26. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/exportDatasetToDataLandingZone.py +0 -0
  27. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/fieldgroupmanager.py +0 -0
  28. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/flowservice.py +0 -0
  29. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/hygiene.py +0 -0
  30. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/identity.py +0 -0
  31. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/ingestion.py +0 -0
  32. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/observability.py +0 -0
  33. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/policy.py +0 -0
  34. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/privacyservice.py +0 -0
  35. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/sandboxes.py +0 -0
  36. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/schema.py +0 -0
  37. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/schemamanager.py +0 -0
  38. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/segmentation.py +0 -0
  39. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/sensei.py +0 -0
  40. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/som.py +0 -0
  41. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/tags.py +0 -0
  42. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp/utils.py +0 -0
  43. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp.egg-info/SOURCES.txt +0 -0
  44. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp.egg-info/dependency_links.txt +0 -0
  45. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp.egg-info/requires.txt +0 -0
  46. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/aepp.egg-info/top_level.txt +0 -0
  47. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/setup.cfg +0 -0
  48. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/setup.py +0 -0
  49. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/__init__.py +0 -0
  50. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/catalog_test.py +0 -0
  51. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/dataaccess_test.py +0 -0
  52. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/datasets_test.py +0 -0
  53. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/destinationinstanceservice_test.py +0 -0
  54. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/exportDatasetToDatalandingZone_test.py +0 -0
  55. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/flowservice_test.py +0 -0
  56. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/schema_test.py +0 -0
  57. {aepp-0.4.1.post2 → aepp-0.4.2.post1}/tests/som_test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aepp
3
- Version: 0.4.1.post2
3
+ Version: 0.4.2.post1
4
4
  Summary: Package to manage AEP API endpoint and some helper functions
5
5
  Home-page: https://github.com/adobe/aepp
6
6
  Author: Julien Piccini
@@ -153,14 +153,15 @@ def extractSandboxArtefacts(
153
153
  completePath = mypath / f'{sandbox.sandbox}'
154
154
  else:
155
155
  completePath = Path(localFolder)
156
- from aepp import schema, catalog, identity
156
+ from aepp import schema, catalog, identity,customerprofile, segmentation
157
157
  sch = schema.Schema(config=sandbox)
158
158
  cat = catalog.Catalog(config=sandbox)
159
159
  ide = identity.Identity(config=sandbox,region=region)
160
160
  completePath.mkdir(exist_ok=True)
161
161
  globalConfig = {
162
162
  "imsOrgId":sandbox.org_id,
163
- "tenantId":f"_{sch.getTenantId()}"
163
+ "tenantId":f"_{sch.getTenantId()}",
164
+ "sandbox":sandbox.sandbox
164
165
  }
165
166
  with open(f'{completePath}/config.json','w') as f:
166
167
  json.dump(globalConfig,f,indent=2)
@@ -184,6 +185,10 @@ def extractSandboxArtefacts(
184
185
  identityPath.mkdir(exist_ok=True)
185
186
  datasetPath = completePath / 'dataset'
186
187
  datasetPath.mkdir(exist_ok=True)
188
+ mergePolicyPath = completePath / 'mergePolicy'
189
+ mergePolicyPath.mkdir(exist_ok=True)
190
+ audiencePath = completePath / 'audience'
191
+ audiencePath.mkdir(exist_ok=True)
187
192
  myclasses = sch.getClasses()
188
193
  classesGlobal = sch.getClassesGlobal()
189
194
  behaviors = sch.getBehaviors()
@@ -257,6 +262,19 @@ def extractSandboxArtefacts(
257
262
  for el in identities:
258
263
  with open(f"{identityPath / el['code']}.json",'w') as f:
259
264
  json.dump(el,f,indent=2)
265
+ ## merge policies
266
+ ups = customerprofile.Profile(config=sandbox)
267
+ mymergePolicies = ups.getMergePolicies()
268
+ for el in mymergePolicies:
269
+ with open(f"{mergePolicyPath / el.get('id','unknown')}.json",'w') as f:
270
+ json.dump(el,f,indent=2)
271
+ ## audiences
272
+ mysegmentation = segmentation.Segmentation(config=sandbox)
273
+ audiences = mysegmentation.getAudiences()
274
+ for el in audiences:
275
+ safe_name = __titleSafe__(el.get('name','unknown'))
276
+ with open(f"{audiencePath / safe_name}.json",'w') as f:
277
+ json.dump(el,f,indent=2)
260
278
 
261
279
  def extractSandboxArtefact(
262
280
  sandbox: 'ConnectObject' = None,
@@ -271,7 +289,7 @@ def extractSandboxArtefact(
271
289
  sandbox: REQUIRED: the instance of a ConnectObject that contains the sandbox information and connection.
272
290
  localFolder: OPTIONAL: the local folder where to extract the sandbox. If not provided, it will use the current working directory and name the folder the name of the sandbox.
273
291
  artefact: REQUIRED: the id or the name of the artefact to export.
274
- artefactType: REQUIRED: the type of artefact to export. Possible values are: 'class','schema','fieldgroup','datatype','descriptor','dataset','identity'
292
+ artefactType: REQUIRED: the type of artefact to export. Possible values are: 'class','schema','fieldgroup','datatype','descriptor','dataset','identity','mergepolicy'
275
293
  region: OPTIONAL: the region of the sandbox (default: nld2). This is used to fetch the correct API endpoints for the identities.
276
294
  Possible values: "va7","aus5", "can2", "ind2"
277
295
  """
@@ -287,7 +305,8 @@ def extractSandboxArtefact(
287
305
  sch = schema.Schema(config=sandbox)
288
306
  globalConfig = {
289
307
  "imsOrgId":sandbox.org_id,
290
- "tenantId":f"_{sch.getTenantId()}"
308
+ "tenantId":f"_{sch.getTenantId()}",
309
+ "sandbox":sandbox.sandbox
291
310
  }
292
311
  with open(f'{completePath}/config.json','w') as f:
293
312
  json.dump(globalConfig,f,indent=2)
@@ -307,6 +326,10 @@ def extractSandboxArtefact(
307
326
  __extractDataset__(artefact,completePath,sandbox,region)
308
327
  elif artefactType == 'identity':
309
328
  __extractIdentity__(artefact,region,completePath,sandbox)
329
+ elif artefactType == 'mergepolicy':
330
+ __extractMergePolicy__(artefact,completePath,sandbox)
331
+ elif artefactType == 'audience':
332
+ __extractAudience__(artefact,completePath,sandbox)
310
333
  else:
311
334
  raise ValueError("artefactType not recognized")
312
335
 
@@ -472,4 +495,29 @@ def __extractDataset__(dataset: str,folder: Union[str, Path] = None,sandbox: 'Co
472
495
  json.dump(myDataset,f,indent=2)
473
496
  schema = myDataset.get('schemaRef',{}).get('id',None)
474
497
  if schema is not None:
475
- __extractSchema__(schema,folder,sandbox,region)
498
+ __extractSchema__(schema,folder,sandbox,region)
499
+
500
+ def __extractMergePolicy__(mergePolicy: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None,region:str=None):
501
+ from aepp import customerprofile
502
+ ups = customerprofile.Profile(config=sandbox)
503
+ mymergePolicies = ups.getMergePolicies()
504
+ mymergePolicy = [el for el in mymergePolicies if el.get('id','') == mergePolicy or el.get('name','') == mergePolicy][0]
505
+ if mymergePolicy['attributeMerge'].get('type','timestampOrdered') == 'dataSetPrecedence':
506
+ list_ds = mymergePolicy['attributeMerge'].get('order',[])
507
+ for ds in list_ds:
508
+ __extractDataset__(ds,folder,sandbox,region)
509
+ mergePolicyPath = Path(folder) / 'mergePolicy'
510
+ mergePolicyPath.mkdir(exist_ok=True)
511
+ with open(f"{mergePolicyPath / mymergePolicy.get('id','unknown')}.json",'w') as f:
512
+ json.dump(mymergePolicy,f,indent=2)
513
+
514
+ def __extractAudience__(audienceName: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None):
515
+ from aepp import segmentation
516
+ mysegmentation = segmentation.Segmentation(config=sandbox)
517
+ audiences = mysegmentation.getAudiences()
518
+ myaudience = [el for el in audiences if el.get('name','') == audienceName or el.get('id','') == audienceName][0]
519
+ audiencePath = Path(folder) / 'audience'
520
+ audiencePath.mkdir(exist_ok=True)
521
+ safe_name = __titleSafe__(myaudience.get('name','unknown'))
522
+ with open(f"{audiencePath / safe_name}.json",'w') as f:
523
+ json.dump(myaudience,f,indent=2)
@@ -0,0 +1 @@
1
+ __version__ = "0.4.2-1"
@@ -706,27 +706,35 @@ class QueryService:
706
706
  res = self.connector.deleteData(self.endpoint + path)
707
707
  return res
708
708
 
709
- def createQueryTemplate(self, queryData: dict = None) -> dict:
709
+ def createQueryTemplate(self, queryData: dict = None,name: str = None,sql: str = None,queryParameters: dict = None) -> dict:
710
710
  """
711
711
  Create a query template based on the dictionary passed.
712
712
  Arguments:
713
- queryData : REQUIED : An object that contains "sql", "queryParameter" and "name" keys.
714
- more info : https://www.adobe.io/apis/experienceplatform/home/api-reference.html#/Query-Templates/create_query_template
713
+ queryData : REQUIED : An object that contains "sql" and "name" keys.
714
+ name : OPTIONAL : Name of the template
715
+ sql : OPTIONAL : SQL query as a string.
716
+ queryParameters : OPTIONAL : in case you are using template, providing the paramter in a dictionary.
717
+ more info : https://developer.adobe.com/experience-platform-apis/references/query-service/#tag/Query-Templates
715
718
  """
716
719
  path = "/query-templates"
717
720
  if self.loggingEnabled:
718
721
  self.logger.debug(f"Starting createTemplate")
719
- if isinstance(queryData, dict):
720
- if (
721
- "sql" not in queryData.keys()
722
- or "queryParameters" not in queryData.keys()
723
- or "name" not in queryData.keys()
724
- ):
725
- raise KeyError(
726
- "Minimum key value are not respected.\nPlease see here for more info :\nhttps://www.adobe.io/apis/experienceplatform/home/api-reference.html#/Query-Templates/create_query_template "
727
- )
722
+ if queryData is not None:
723
+ if isinstance(queryData, dict):
724
+ if ("sql" not in queryData.keys() or "name" not in queryData.keys()):
725
+ raise KeyError(
726
+ "Minimum key value are not respected.\nPlease see here for more info :\nhttps://www.adobe.io/apis/experienceplatform/home/api-reference.html#/Query-Templates/create_query_template "
727
+ )
728
+ else:
729
+ raise Exception("expected a dictionary for queryData")
728
730
  else:
729
- raise Exception("expected a dictionary for queryData")
731
+ if name is None or sql is None:
732
+ raise Exception(
733
+ "Either queryData dictionary or name and sql parameters are required."
734
+ )
735
+ queryData = {"name": name, "sql": sql}
736
+ if queryParameters is not None:
737
+ queryData["queryParameters"] = queryParameters
730
738
  res = self.connector.postData(
731
739
  self.endpoint + path, headers=self.header, data=queryData
732
740
  )
@@ -10,7 +10,7 @@
10
10
 
11
11
  import json
12
12
  import aepp
13
- from aepp import schema, schemamanager, fieldgroupmanager, datatypemanager,classmanager,identity,catalog
13
+ from aepp import schema, schemamanager, fieldgroupmanager, datatypemanager,classmanager,identity,catalog,customerprofile,segmentation
14
14
  from copy import deepcopy
15
15
  from typing import Union
16
16
  from pathlib import Path
@@ -65,21 +65,83 @@ class Synchronizer:
65
65
  self.identityFolder = self.localfolder / 'identity'
66
66
  self.datasetFolder = self.localfolder / 'dataset'
67
67
  self.descriptorFolder = self.localfolder / 'descriptor'
68
+ self.mergePolicyFolder = self.localfolder / 'mergepolicy'
69
+ self.audienceFolder = self.localfolder / 'audience'
68
70
  if baseSandbox is not None:
69
71
  self.baseSandbox = baseSandbox
72
+ else:
73
+ with open(self.localfolder / 'config.json','r') as f:
74
+ local_config = json.load(f)
75
+ self.baseSandbox = local_config.get('sandbox',None)
70
76
  self.dict_targetsConfig = {target: aepp.configure(org_id=config_object['org_id'],client_id=config_object['client_id'],scopes=config_object['scopes'],secret=config_object['secret'],sandbox=target,connectInstance=True) for target in targets}
71
77
  self.region = region
72
- self.dict_targetComponents = {target:{'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{}} for target in targets}
78
+ self.dict_baseComponents = {'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}}
79
+ self.dict_targetComponents = {target:{'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}} for target in targets}
73
80
 
74
- def syncComponent(self,component:Union[str,dict],componentType:str=None,verbose:bool=False)-> dict:
81
+ def getSyncFieldGroupManager(self,fieldgroup:str,sandbox:str=None)-> dict:
82
+ """
83
+ Get a field group Manager from the synchronizer.
84
+ It searches through the component cache to see if the FieldGroupManager for the target sandbox is already instantiated.
85
+ If not, it generate an error.
86
+ Arguments:
87
+ fieldgroup : REQUIRED : Either $id, or name or alt:Id of the field group to get
88
+ sandbox : REQUIRED : name of the sandbox to get the field group from
89
+ """
90
+ if sandbox is None:
91
+ raise ValueError("a sandbox name must be provided")
92
+ if sandbox == self.baseSandbox:
93
+ if fieldgroup in self.dict_baseComponents['fieldgroup'].keys():
94
+ return self.dict_baseComponents['fieldgroup'][fieldgroup]
95
+ elif fieldgroup in [self.dict_baseComponents['fieldgroup'][fg].id for fg in self.dict_baseComponents['fieldgroup'].keys()]:
96
+ fg_key = [fg for fg in self.dict_baseComponents['fieldgroup'].keys() if self.dict_baseComponents['fieldgroup'][fg].id == fieldgroup][0]
97
+ return self.dict_baseComponents['fieldgroup'][fg_key]
98
+ elif fieldgroup in [self.dict_baseComponents['fieldgroup'][fg].altId for fg in self.dict_baseComponents['fieldgroup'].keys()]:
99
+ fg_key = [fg for fg in self.dict_baseComponents['fieldgroup'].keys() if self.dict_baseComponents['fieldgroup'][fg].altId == fieldgroup][0]
100
+ return self.dict_baseComponents['fieldgroup'][fg_key]
101
+ else:
102
+ raise ValueError(f"the field group '{fieldgroup}' has not been synchronized to the sandbox '{sandbox}'")
103
+ else:
104
+ if fieldgroup in self.dict_targetComponents[sandbox]['fieldgroup'].keys():
105
+ return self.dict_targetComponents[sandbox]['fieldgroup'][fieldgroup]
106
+ elif fieldgroup in [self.dict_targetComponents[sandbox]['fieldgroup'][fg].id for fg in self.dict_targetComponents[sandbox]['fieldgroup'].keys()]:
107
+ fg_key = [fg for fg in self.dict_targetComponents[sandbox]['fieldgroup'].keys() if self.dict_targetComponents[sandbox]['fieldgroup'][fg].id == fieldgroup][0]
108
+ return self.dict_targetComponents[sandbox]['fieldgroup'][fg_key]
109
+ elif fieldgroup in [self.dict_targetComponents[sandbox]['fieldgroup'][fg].altId for fg in self.dict_targetComponents[sandbox]['fieldgroup'].keys()]:
110
+ fg_key = [fg for fg in self.dict_targetComponents[sandbox]['fieldgroup'].keys() if self.dict_targetComponents[sandbox]['fieldgroup'][fg].altId == fieldgroup][0]
111
+ return self.dict_targetComponents[sandbox]['fieldgroup'][fg_key]
112
+ else:
113
+ raise ValueError(f"the field group '{fieldgroup}' has not been synchronized to the sandbox '{sandbox}'")
114
+
115
+ def getDatasetName(self,datasetId:str,sandbox:str=None)-> str:
116
+ """
117
+ Get a dataset name from the synchronizer base on the ID of the dataset.
118
+ Arguments:
119
+ datasetId : REQUIRED : id of the dataset to get
120
+ sandbox : REQUIRED : name of the sandbox to get the dataset from
121
+ """
122
+ if sandbox is None:
123
+ raise ValueError("a sandbox name must be provided")
124
+ if sandbox == self.baseSandbox:
125
+ if datasetId in [item.get('id') for key,item in self.dict_baseComponents['datasets'].items()]:
126
+ return [key for key,item in self.dict_baseComponents['datasets'].items() if item.get('id') == datasetId][0]
127
+ else:
128
+ raise ValueError(f"the dataset '{datasetId}' has not been synchronized to the sandbox '{sandbox}'")
129
+ else:
130
+ if datasetId in [item.get('id') for key,item in self.dict_targetComponents[sandbox]['datasets'].items()]:
131
+ return [key for key,item in self.dict_targetComponents[sandbox]['datasets'].items() if item.get('id') == datasetId][0]
132
+ else:
133
+ raise ValueError(f"the dataset '{datasetId}' has not been synchronized to the sandbox '{sandbox}'")
134
+
135
+ def syncComponent(self,component:Union[str,dict],componentType:str=None,force:bool=False,verbose:bool=False)-> dict:
75
136
  """
76
137
  Synchronize a component to the target sandbox.
77
138
  The component could be a string (name or id of the component in the base sandbox) or a dictionary with the definition of the component.
78
139
  If the component is a string, you have to have provided a base sandbox in the constructor.
79
140
  Arguments:
80
141
  component : REQUIRED : name or id of the component or a dictionary with the component definition
81
- componentType : OPTIONAL : type of the component (e.g. "schema", "fieldgroup", "datatypes", "class", "identity", "dataset"). Required if a string is passed.
82
- It is not required but if the type cannot be inferred from the component, it will raise an error.
142
+ componentType : OPTIONAL : type of the component (e.g. "schema", "fieldgroup", "datatypes", "class", "identity", "dataset", "mergepolicy", "audience"). Required if a string is passed.
143
+ It is not required but if the type cannot be inferred from the component, it will raise an error.
144
+ force : OPTIONAL : if True, it will force the synchronization of the component even if it already exists in the target sandbox. Works for Schema, FieldGroup, DataType and Class.
83
145
  verbose : OPTIONAL : if True, it will print the details of the synchronization process
84
146
  """
85
147
  if type(component) == str:
@@ -87,8 +149,8 @@ class Synchronizer:
87
149
  raise ValueError("a base sandbox or a local folder must be provided to synchronize a component by name or id")
88
150
  if componentType is None:
89
151
  raise ValueError("the type of the component must be provided if the component is a string")
90
- if componentType not in ['schema', 'fieldgroup', 'datatypes', 'class', 'identity', 'dataset']:
91
- raise ValueError("the type of the component is not supported. Please provide one of the following types: schema, fieldgroup, datatypes, class, identity, dataset")
152
+ if componentType not in ['schema', 'fieldgroup', 'datatypes', 'class', 'identity', 'dataset', 'mergepolicy', 'audience']:
153
+ raise ValueError("the type of the component is not supported. Please provide one of the following types: schema, fieldgroup, datatypes, class, identity, dataset, mergepolicy, audience")
92
154
  if componentType in ['schema', 'fieldgroup', 'datatypes', 'class']:
93
155
  if self.baseConfig is not None:
94
156
  base_schema = schema.Schema(config=self.baseConfig)
@@ -165,6 +227,30 @@ class Synchronizer:
165
227
  break
166
228
  if len(component) == 1: ## if the component is the catalog API response {'key': {dataset definition}}
167
229
  component = component[list(component.keys())[0]] ## accessing the real dataset definition
230
+ elif componentType == "mergepolicy":
231
+ if self.baseConfig is not None:
232
+ ups_base = customerprofile.Profile(config=self.baseConfig)
233
+ base_mergePolicies = ups_base.getMergePolicies()
234
+ if component in [el.get('id','') for el in base_mergePolicies] or component in [el.get('name','') for el in base_mergePolicies]:
235
+ component = [el for el in base_mergePolicies if el.get('id','') == component or el.get('name','') == component][0]
236
+ elif self.localfolder is not None:
237
+ for file in self.mergePolicyFolder.glob('*.json'):
238
+ mp_file = json.load(FileIO(file))
239
+ if mp_file.get('id','') == component or mp_file.get('name','') == component:
240
+ component = mp_file
241
+ break
242
+ elif componentType == 'audience':
243
+ if self.baseConfig is not None:
244
+ seg_base = segmentation.Segmentation(config=self.baseConfig)
245
+ base_audiences = seg_base.getAudiences()
246
+ if component in [el.get('id','') for el in base_audiences] or component in [el.get('name','') for el in base_audiences]:
247
+ component = [el for el in base_audiences if el.get('id','') == component or el.get('name','') == component][0]
248
+ elif self.localfolder is not None:
249
+ for file in self.audienceFolder.glob('*.json'):
250
+ au_file = json.load(FileIO(file))
251
+ if au_file.get('id','') == component or au_file.get('name','') == component:
252
+ component = au_file
253
+ break
168
254
  elif type(component) == dict:
169
255
  if 'meta:resourceType' in component.keys():
170
256
  componentType = component['meta:resourceType']
@@ -180,30 +266,41 @@ class Synchronizer:
180
266
  componentType = 'identity'
181
267
  elif 'files' in component.keys():
182
268
  componentType = 'dataset'
269
+ elif 'attributeMerge' in component.keys():
270
+ componentType = 'mergepolicy'
271
+ elif 'expression' in component.keys():
272
+ componentType = 'audience'
183
273
  else:
184
274
  raise TypeError("the component type could not be inferred from the component or is not supported. Please provide the type as a parameter")
185
275
  ## Synchronize the component to the target sandboxes
186
276
  if componentType == 'datatypes':
187
- self.__syncDataType__(component,verbose=verbose)
277
+ self.__syncDataType__(component,verbose=verbose,force=force)
188
278
  if componentType == 'fieldgroup':
189
- self.__syncFieldGroup__(component,verbose=verbose)
279
+ self.__syncFieldGroup__(component,verbose=verbose,force=force)
190
280
  if componentType == 'schema':
191
- self.__syncSchema__(component,verbose=verbose)
281
+ self.__syncSchema__(component,verbose=verbose,force=force)
192
282
  if componentType == 'class':
193
- self.__syncClass__(component,verbose=verbose)
283
+ self.__syncClass__(component,verbose=verbose,force=force)
194
284
  if componentType == 'identity':
195
285
  self.__syncIdentity__(component,verbose=verbose)
196
286
  if componentType == 'dataset':
197
287
  self.__syncDataset__(component,verbose=verbose)
288
+ if componentType == 'mergepolicy':
289
+ self.__syncMergePolicy__(component,verbose=verbose)
290
+ if componentType == 'audience':
291
+ self.__syncAudience__(component,verbose=verbose)
198
292
 
199
- def __syncClass__(self,baseClass:'ClassManager',verbose:bool=False)-> dict:
293
+
294
+ def __syncClass__(self,baseClass:'ClassManager',force:bool=False,verbose:bool=False)-> dict:
200
295
  """
201
296
  Synchronize a class to the target sandboxes.
202
297
  Arguments:
203
298
  baseClass : REQUIRED : class id or name to synchronize
299
+ force : OPTIONAL : if True, it will force the synchronization of the class even if it already exists in the target sandbox
204
300
  """
205
301
  if not isinstance(baseClass,classmanager.ClassManager):
206
302
  raise TypeError("the baseClass must be a classManager instance")
303
+ self.dict_baseComponents['class'][baseClass.title] = baseClass
207
304
  baseClassName = baseClass.title
208
305
  baseBehavior = baseClass.behavior
209
306
  for target in self.dict_targetsConfig.keys():
@@ -223,14 +320,16 @@ class Synchronizer:
223
320
  self.dict_targetComponents[target]['class'][baseClassName] = t_newClass
224
321
 
225
322
 
226
- def __syncDataType__(self,baseDataType:'DataTypeManager',verbose:bool=False)-> dict:
323
+ def __syncDataType__(self,baseDataType:'DataTypeManager',force:bool=False,verbose:bool=False)-> dict:
227
324
  """
228
325
  Synchronize a data type to the target sandbox.
229
326
  Arguments:
230
327
  baseDataType : REQUIRED : DataTypeManager object with the data type to synchronize
328
+ force : OPTIONAL : if True, it will force the synchronization of the data type even if it already exists in the target sandbox
231
329
  """
232
330
  if not isinstance(baseDataType,datatypemanager.DataTypeManager):
233
331
  raise TypeError("the baseDataType must be a DataTypeManager object")
332
+ self.dict_baseComponents['datatype'][baseDataType.title] = baseDataType
234
333
  name_base_datatype = baseDataType.title
235
334
  for target in self.dict_targetsConfig.keys():
236
335
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
@@ -249,7 +348,7 @@ class Synchronizer:
249
348
  base_paths = df_base['path'].tolist()
250
349
  target_paths = df_target['path'].tolist()
251
350
  diff_paths = list(set(base_paths) - set(target_paths))
252
- if len(diff_paths) > 0: ## there are differences
351
+ if len(diff_paths) > 0 or force==True: ## there are differences
253
352
  base_datatypes_paths = baseDataType.getDataTypePaths()
254
353
  df_base_limited = df_base[df_base['origin'] == 'self'].copy() ## exclude field group native fields
255
354
  df_base_limited = df_base_limited[~df_base_limited['path'].isin(list(base_datatypes_paths.keys()))] ## exclude base of datatype rows
@@ -258,7 +357,7 @@ class Synchronizer:
258
357
  base_dict_path_dtTitle = {}
259
358
  for path,dt_id in base_datatypes_paths.items():
260
359
  tmp_dt_manager = baseDataType.getDataTypeManager(dt_id)
261
- self.__syncDataType__(tmp_dt_manager,verbose=verbose)
360
+ self.__syncDataType__(tmp_dt_manager,force=force,verbose=verbose)
262
361
  base_dict_path_dtTitle[path] = tmp_dt_manager.title
263
362
  target_datatypes_paths = t_datatype.getDataTypePaths(som_compatible=True)
264
363
  target_datatypes_paths_list = list(target_datatypes_paths.keys())
@@ -289,7 +388,7 @@ class Synchronizer:
289
388
  base_dict_path_dtTitle = {}
290
389
  for path,dt_id in base_datatypes_paths.items():
291
390
  tmp_dt_manager = baseDataType.getDataTypeManager(dt_id)
292
- self.__syncDataType__(tmp_dt_manager,verbose=verbose)
391
+ self.__syncDataType__(tmp_dt_manager,force=force,verbose=verbose)
293
392
  base_dict_path_dtTitle[path] = tmp_dt_manager.title
294
393
  target_datatypes_paths = new_datatype.getDataTypePaths(som_compatible=True)
295
394
  target_datatypes_paths_list = list(target_datatypes_paths.keys())
@@ -309,14 +408,16 @@ class Synchronizer:
309
408
  raise Exception("the data type could not be created in the target sandbox")
310
409
  self.dict_targetComponents[target]['datatype'][name_base_datatype] = t_datatype
311
410
 
312
- def __syncFieldGroup__(self,baseFieldGroup:'FieldGroupManager',verbose:bool=False)-> dict:
411
+ def __syncFieldGroup__(self,baseFieldGroup:'FieldGroupManager',force:bool=True,verbose:bool=False)-> dict:
313
412
  """
314
413
  Synchronize a field group to the target sandboxes.
315
414
  Argument:
316
415
  baseFieldGroup : REQUIRED : FieldGroupManager object with the field group to synchronize
416
+ force : OPTIONAL : if True, it will force the synchronization of the field group even if it already exists in the target sandbox
317
417
  """
318
418
  if not isinstance(baseFieldGroup,fieldgroupmanager.FieldGroupManager):
319
419
  raise TypeError("the baseFieldGroup must be a FieldGroupManager object")
420
+ self.dict_baseComponents['fieldgroup'][baseFieldGroup.title] = baseFieldGroup
320
421
  name_base_fieldgroup = baseFieldGroup.title
321
422
  base_fg_classIds = baseFieldGroup.classIds
322
423
  for target in self.dict_targetsConfig.keys():
@@ -350,7 +451,7 @@ class Synchronizer:
350
451
  base_paths = df_base['path'].tolist()
351
452
  target_paths = df_target['path'].tolist()
352
453
  diff_paths = [path for path in base_paths if path not in target_paths]
353
- if len(diff_paths) > 0:
454
+ if len(diff_paths) > 0 or force==True:
354
455
  base_datatypes_paths = baseFieldGroup.getDataTypePaths()
355
456
  ## handling fieldgroup native fields
356
457
  df_base_limited = df_base[df_base['origin'] == 'fieldGroup'].copy() ## exclude datatypes
@@ -371,7 +472,7 @@ class Synchronizer:
371
472
  base_dict_path_dtTitle = {}
372
473
  for path,dt_id in base_datatypes_paths.items():
373
474
  tmp_dt_manager = baseFieldGroup.getDataTypeManager(dt_id)
374
- self.__syncDataType__(tmp_dt_manager,verbose=verbose)
475
+ self.__syncDataType__(tmp_dt_manager,force=force,verbose=verbose)
375
476
  base_dict_path_dtTitle[path] = tmp_dt_manager.title
376
477
  target_datatypes_paths = t_fieldgroup.getDataTypePaths(som_compatible=True)
377
478
  target_datatypes_paths_list = list(target_datatypes_paths.keys())
@@ -423,7 +524,7 @@ class Synchronizer:
423
524
  base_dict_path_dtTitle = {}
424
525
  for path,dt_id in base_datatypes_paths.items():
425
526
  tmp_dt_manager = baseFieldGroup.getDataTypeManager(dt_id)
426
- self.__syncDataType__(tmp_dt_manager,verbose=verbose)
527
+ self.__syncDataType__(tmp_dt_manager,force=force,verbose=verbose)
427
528
  base_dict_path_dtTitle[path] = tmp_dt_manager.title
428
529
  for path,dt_title in base_dict_path_dtTitle.items():
429
530
  tmp_t_dt = self.dict_targetComponents[target]['datatype'][dt_title]
@@ -442,16 +543,18 @@ class Synchronizer:
442
543
  self.dict_targetComponents[target]['fieldgroup'][name_base_fieldgroup] = t_fieldgroup
443
544
 
444
545
 
445
- def __syncSchema__(self,baseSchema:'SchemaManager',verbose:bool=False)-> dict:
546
+ def __syncSchema__(self,baseSchema:'SchemaManager',force:bool=False,verbose:bool=False)-> dict:
446
547
  """
447
548
  Sync the schema to the target sandboxes.
448
549
  Arguments:
449
550
  baseSchema : REQUIRED : SchemaManager object to synchronize
551
+ force : OPTIONAL : if True, it will force the synchronization of field groups even if they already exist in the target schema
450
552
  """
451
553
  ## TO DO -> sync required fields
452
554
  if not isinstance(baseSchema,schemamanager.SchemaManager):
453
555
  raise TypeError("the baseSchema must be a SchemaManager object")
454
556
  name_base_schema = baseSchema.title
557
+ self.dict_baseComponents['schema'][name_base_schema] = baseSchema
455
558
  descriptors = baseSchema.getDescriptors()
456
559
  base_field_groups_names = list(baseSchema.fieldGroups.values())
457
560
  dict_base_fg_name_id = {name:fg_id for fg_id,name in baseSchema.fieldGroups.items()}
@@ -465,7 +568,7 @@ class Synchronizer:
465
568
  t_schema = schemamanager.SchemaManager(targetSchemaAPI.data.schemas_altId[name_base_schema],config=self.dict_targetsConfig[target],sandbox=target)
466
569
  new_fieldgroups = [fg for fg in base_field_groups_names if fg not in t_schema.fieldGroups.values()]
467
570
  existing_fieldgroups = [fg for fg in base_field_groups_names if fg in t_schema.fieldGroups.values()]
468
- if len(new_fieldgroups) > 0: ## if new field groups
571
+ if len(new_fieldgroups) > 0 or force==True: ## if new field groups
469
572
  if verbose:
470
573
  print('found new field groups to add to the schema')
471
574
  for new_fieldgroup in new_fieldgroups:
@@ -478,7 +581,8 @@ class Synchronizer:
478
581
  if verbose:
479
582
  print(f"field group '{new_fieldgroup}' is a custom field group, syncing it")
480
583
  tmp_FieldGroup = baseSchema.getFieldGroupManager(new_fieldgroup)
481
- self.__syncFieldGroup__(tmp_FieldGroup,verbose=verbose)
584
+ print(f"Creating new custom field group '{tmp_FieldGroup.title}'")
585
+ self.__syncFieldGroup__(tmp_FieldGroup,verbose=verbose,force=force)
482
586
  t_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][new_fieldgroup].id)
483
587
  t_schema.setDescription(baseSchema.description)
484
588
  res = t_schema.updateSchema()
@@ -490,13 +594,35 @@ class Synchronizer:
490
594
  for fg_name in existing_fieldgroups:
491
595
  if baseSchema.tenantId[1:] in dict_base_fg_name_id[fg_name]: ## custom field group
492
596
  tmp_fieldGroupManager = fieldgroupmanager.FieldGroupManager(dict_base_fg_name_id[fg_name],config=self.baseConfig,sandbox=target,localFolder=self.localfolder)
493
- self.__syncFieldGroup__(tmp_fieldGroupManager,verbose=verbose)
597
+ self.__syncFieldGroup__(tmp_fieldGroupManager,force=force,verbose=verbose)
494
598
  else:
495
599
  if verbose:
496
600
  print(f"field group '{fg_name}' is a OOTB field group, using it")
497
601
  self.dict_targetComponents[target]['fieldgroup'][fg_name] = fieldgroupmanager.FieldGroupManager(dict_base_fg_name_id[fg_name],config=self.dict_targetsConfig[target],sandbox=target)
498
602
  list_new_descriptors = self.__syncDescriptor__(baseSchema,t_schema,targetSchemaAPI=targetSchemaAPI,verbose=verbose)
603
+ ## handling the meta:refProperty setup if any
604
+ base_allOf = baseSchema.schema.get('allOf',[])
605
+ base_fg_name_metaref = {}
606
+ for refEl in base_allOf: ## retrieving the meta:refProperty from the base schema
607
+ if 'meta:refProperty' in refEl.keys():
608
+ tmp_base_fg_id = refEl['$ref']
609
+ if baseSchema.tenantId[1:] in tmp_base_fg_id:
610
+ tmp_base_fg_manager = self.getSyncFieldGroupManager(tmp_base_fg_id,sandbox=baseSchema.sandbox)
611
+ base_fg_name_metaref[tmp_base_fg_manager.title] = refEl['meta:refProperty']
612
+ else:
613
+ base_fg_name_metaref[tmp_base_fg_id] = refEl['meta:refProperty']
614
+ for fg_name,ref_property in base_fg_name_metaref.items(): ## updating the target schema with the meta:refProperty
615
+ for ref in t_schema.schema.get('allOf',[]):
616
+ tmp_target_fg_id = ref['$ref']
617
+ if baseSchema.tenantId[1:] in tmp_target_fg_id:
618
+ tmp_target_fg_manager = self.getSyncFieldGroupManager(tmp_target_fg_id,sandbox=target)
619
+ if fg_name == tmp_target_fg_manager.title:
620
+ ref['meta:refProperty'] = ref_property
621
+ else:
622
+ if fg_name == ref['$ref']:
623
+ ref['meta:refProperty'] = ref_property
499
624
  self.dict_targetComponents[target]['schemaDescriptors'][name_base_schema] = list_new_descriptors
625
+ t_schema.updateSchema()
500
626
  else: ## schema does not exist in target
501
627
  if verbose:
502
628
  print(f"schema '{name_base_schema}' does not exist in target {target}, creating it")
@@ -506,7 +632,7 @@ class Synchronizer:
506
632
  tenantidId = baseSchema.tenantId
507
633
  if tenantidId[1:] in baseClassId: ## custom class
508
634
  baseClassManager = classmanager.ClassManager(baseClassId,config=self.baseConfig,sandbox=target,localFolder=self.localfolder,sandboxBase=self.baseSandbox,tenantidId=tenantidId)
509
- self.__syncClass__(baseClassManager,verbose=verbose)
635
+ self.__syncClass__(baseClassManager,force=force,verbose=verbose)
510
636
  targetClassManager = self.dict_targetComponents[target]['class'][baseClassManager.title]
511
637
  classId_toUse = targetClassManager.id
512
638
  else:
@@ -523,7 +649,7 @@ class Synchronizer:
523
649
  if verbose:
524
650
  print(f"field group '{fg_name}' is a custom field group, using it")
525
651
  tmp_FieldGroup = baseSchema.getFieldGroupManager(fg_name)
526
- self.__syncFieldGroup__(tmp_FieldGroup,verbose=verbose)
652
+ self.__syncFieldGroup__(tmp_FieldGroup,force=force,verbose=verbose)
527
653
  new_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][fg_name].id)
528
654
  new_schema.setDescription(baseSchema.description)
529
655
  res = new_schema.createSchema()
@@ -535,6 +661,28 @@ class Synchronizer:
535
661
  ## handling descriptors
536
662
  list_new_descriptors = self.__syncDescriptor__(baseSchema,t_schema,targetSchemaAPI,verbose=verbose)
537
663
  self.dict_targetComponents[target]['schemaDescriptors'][name_base_schema] = list_new_descriptors
664
+ ## handling the meta:refProperty setup if any
665
+ base_allOf = baseSchema.schema.get('allOf',[])
666
+ base_fg_name_metaref = {}
667
+ for refEl in base_allOf: ## retrieving the meta:refProperty from the base schema
668
+ if 'meta:refProperty' in refEl.keys():
669
+ tmp_base_fg_id = refEl['$ref']
670
+ if baseSchema.tenantId[1:] in tmp_base_fg_id:
671
+ tmp_base_fg_manager = self.getSyncFieldGroupManager(tmp_base_fg_id,sandbox=baseSchema.sandbox)
672
+ base_fg_name_metaref[tmp_base_fg_manager.title] = refEl['meta:refProperty']
673
+ else:
674
+ base_fg_name_metaref[tmp_base_fg_id] = refEl['meta:refProperty']
675
+ for fg_name,ref_property in base_fg_name_metaref.items(): ## updating the target schema with the meta:refProperty
676
+ for ref in t_schema.schema.get('allOf',[]):
677
+ tmp_target_fg_id = ref['$ref']
678
+ if baseSchema.tenantId[1:] in tmp_target_fg_id:
679
+ tmp_target_fg_manager = self.getSyncFieldGroupManager(tmp_target_fg_id,sandbox=target)
680
+ if fg_name == tmp_target_fg_manager.title:
681
+ ref['meta:refProperty'] = ref_property
682
+ else:
683
+ if fg_name == ref['$ref']:
684
+ ref['meta:refProperty'] = ref_property
685
+ t_schema.updateSchema()
538
686
  self.dict_targetComponents[target]['schema'][name_base_schema] = t_schema
539
687
 
540
688
  def __syncDescriptor__(self,baseSchemaManager:'SchemaManager'=None,targetSchemaManager:'SchemaManager'=None,targetSchemaAPI:'Schema'=None,verbose:bool=False)-> dict:
@@ -553,6 +701,7 @@ class Synchronizer:
553
701
  if not isinstance(targetSchemaManager,schemamanager.SchemaManager):
554
702
  raise TypeError("the targetSchemaManager must be a SchemaManager object")
555
703
  base_descriptors = baseSchemaManager.getDescriptors()
704
+ self.dict_baseComponents['schemaDescriptors'][baseSchemaManager.title] = {}
556
705
  if self.baseConfig is not None:
557
706
  baseSchemaAPI = schema.Schema(config=self.baseConfig)
558
707
  myschemas = baseSchemaAPI.getSchemas() ## to populate the data object
@@ -717,6 +866,7 @@ class Synchronizer:
717
866
  if not isinstance(identityDefiniton,dict):
718
867
  raise TypeError("the identityDefinition must be a dictionary")
719
868
  code_base_identity = identityDefiniton['code']
869
+ self.dict_baseComponents['identities'][code_base_identity] = identityDefiniton
720
870
  for target in self.dict_targetsConfig.keys():
721
871
  targetIdentity = identity.Identity(config=self.dict_targetsConfig[target],region=self.region)
722
872
  t_identities = targetIdentity.getIdentities()
@@ -742,7 +892,8 @@ class Synchronizer:
742
892
  baseDataset : REQUIRED : dictionary with the dataset definition
743
893
  """
744
894
  if len(baseDataset) == 1: ## if receiving the dataset as provided by the API {datasetId:{...definition}}
745
- baseDataset = deepcopy(baseDataset[list(baseDataset.keys()[0])])
895
+ baseDataset = deepcopy(baseDataset[list(baseDataset.keys())[0]])
896
+ self.dict_baseComponents['datasets'][baseDataset['name']] = baseDataset
746
897
  base_datasetName = baseDataset['name']
747
898
  base_dataset_related_schemaId = baseDataset['schemaRef']['id']
748
899
  if self.baseConfig is not None:
@@ -786,4 +937,100 @@ class Synchronizer:
786
937
  t_schemas = targetSchema.getSchemas()
787
938
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
788
939
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
940
+ self.dict_targetComponents[target]['datasets'][base_datasetName] = t_dataset
941
+
942
+ def __syncMergePolicy__(self,mergePolicy:dict,verbose:bool=False)->None:
943
+ """
944
+ Synchronize the dataset to the target sandboxes. Mostly creating a new dataset and associated artefacts when not already created.
945
+ Arguments:
946
+ mergePolicy : REQUIRED : The merge policy dictionary to sync
947
+ """
948
+ if not isinstance(mergePolicy,dict):
949
+ raise TypeError("the mergePolicy must be a dictionary")
950
+ self.dict_baseComponents['mergePolicy'][mergePolicy.get('id','unknown')] = mergePolicy
951
+ mergePolicy_name = mergePolicy.get('name','unknown')
952
+ if mergePolicy['attributeMerge'].get('type','timestampOrdered') == 'dataSetPrecedence':
953
+ if verbose:
954
+ print(f"handling dataset precedence for merge policy '{mergePolicy_name}'")
955
+ print("syncing the datasets involved in the precedence order")
956
+ base_list_precedenceDatasets = mergePolicy['attributeMerge'].get('order',[])
957
+ for ds_id in base_list_precedenceDatasets:
958
+ res = self.syncComponent(ds_id,componentType='dataset',verbose=verbose)
959
+ for target in self.dict_targetsConfig.keys():
960
+ targetCustomerProfile = customerprofile.Profile(config=self.dict_targetsConfig[target])
961
+ t_mergePolicies = targetCustomerProfile.getMergePolicies()
962
+ if mergePolicy_name not in [el.get('name','') for el in t_mergePolicies]: ## merge policy does not exist in target
963
+ if verbose:
964
+ print(f"merge policy '{mergePolicy_name}' does not exist in target {target}, creating it")
965
+ mergePolicyDef = {
966
+ "name":mergePolicy.get('name',''),
967
+ "schema":mergePolicy.get('schema','_xdm.context.profile'),
968
+ "identityGraph":mergePolicy.get('identityGraph','pdg'),
969
+ "isActiveOnEdge":mergePolicy.get('isActiveOnEdge',False),
970
+ }
971
+ if mergePolicy['attributeMerge'].get('type','timestampOrdered') == 'dataSetPrecedence':
972
+ target_list_precedenceDatasets = []
973
+ for base_ds_id in mergePolicy['attributeMerge'].get('order',[]):
974
+ base_ds_name = self.getDatasetName(base_ds_id,sandbox=target)
975
+ target_ds_id = self.dict_targetComponents[target]['datasets'][base_ds_name]['id']
976
+ target_list_precedenceDatasets.append(target_ds_id)
977
+ mergePolicyDef['attributeMerge'] = {
978
+ "type":mergePolicy['attributeMerge'].get('type','timestampOrdered'),
979
+ "order":target_list_precedenceDatasets
980
+ }
981
+ else:
982
+ mergePolicyDef['attributeMerge'] = {'type':'timestampOrdered'}
983
+ res = targetCustomerProfile.createMergePolicy(mergePolicyDef)
984
+ if 'id' in res.keys():
985
+ self.dict_targetComponents[target]['mergePolicy'][res['id']] = res
986
+ else:
987
+ print(res)
988
+ raise Exception("the merge policy could not be created in the target sandbox")
989
+ else: ## merge policy already exists in target
990
+ if verbose:
991
+ print(f"merge policy '{mergePolicy_name}' already exists in target {target}, saving it")
992
+ self.dict_targetComponents[target]['mergePolicy'][mergePolicy_name] = [el for el in t_mergePolicies if el.get('name','') == mergePolicy_name][0]
993
+
994
+ def __syncAudience__(self,baseAudience:dict,verbose:bool=False)-> None:
995
+ """
996
+ Synchronize an audience to the target sandboxes.
997
+ Arguments:
998
+ baseAudience : REQUIRED : dictionary with the audience definition
999
+ """
1000
+ if not isinstance(baseAudience,dict):
1001
+ raise TypeError("the baseAudience must be a dictionary")
1002
+ audience_name = baseAudience.get('name','unknown')
1003
+ self.dict_baseComponents['audience'][audience_name] = baseAudience
1004
+ for target in self.dict_targetsConfig.keys():
1005
+ targetAudiences = segmentation.Segmentation(config=self.dict_targetsConfig[target])
1006
+ t_audiences = targetAudiences.getAudiences()
1007
+ if audience_name not in [el['name'] for el in t_audiences]: ## audience does not exist in target
1008
+ if verbose:
1009
+ print(f"audience '{audience_name}' does not exist in target {target}, creating it")
1010
+ audienceDef = {
1011
+ "name":baseAudience.get('name',''),
1012
+ "description":baseAudience.get('description',''),
1013
+ "type":baseAudience.get('type','SegmentDefinition'),
1014
+ "schema":baseAudience.get('schema','_xdm.context.profile'),
1015
+ "expression":baseAudience.get('expression',[]),
1016
+ "ansibleDataModel":baseAudience.get('ansibleDataModel',{}),
1017
+ "profileInstanceId":baseAudience.get('profileInstanceId',''),
1018
+ "evaluationInfo":baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1019
+ }
1020
+ res = targetAudiences.createAudience(audienceDef)
1021
+ if 'id' in res.keys():
1022
+ self.dict_targetComponents[target]['audience'][res['id']] = res
1023
+ else:
1024
+ print(res)
1025
+ raise Exception("the audience could not be created in the target sandbox")
1026
+ else: ## audience already exists in target
1027
+ if verbose:
1028
+ print(f"audience '{audience_name}' already exists in target {target}, updating it")
1029
+ t_audience = [el for el in t_audiences if el['name'] == audience_name][0]
1030
+ t_audience['description'] = baseAudience.get('description','')
1031
+ t_audience['expression'] = baseAudience.get('expression',[])
1032
+ t_audience['ansibleDataModel'] = baseAudience.get('ansibleDataModel',{})
1033
+ t_audience['evaluationInfo'] = baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1034
+ res = targetAudiences.putAudience(t_audience['id'],t_audience)
1035
+ self.dict_targetComponents[target]['audience'][audience_name] = res
789
1036
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aepp
3
- Version: 0.4.1.post2
3
+ Version: 0.4.2.post1
4
4
  Summary: Package to manage AEP API endpoint and some helper functions
5
5
  Home-page: https://github.com/adobe/aepp
6
6
  Author: Julien Piccini
@@ -1 +0,0 @@
1
- __version__ = "0.4.1-2"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes