aepp 0.5.2.post2__py3-none-any.whl → 0.5.2.post3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aepp/__init__.py CHANGED
@@ -159,9 +159,8 @@ def __titleSafe__(text: str) -> str:
159
159
  Arguments:
160
160
  text : REQUIRED : the text to be converted
161
161
  """
162
- invalid_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*', ' ']
163
- for char in invalid_chars:
164
- text = text.replace(char, '_')
162
+ valid_chars = "[^a-zA-Z0-9_\n\\.]"
163
+ text = re.sub(valid_chars, "_", text)
165
164
  return text
166
165
 
167
166
 
@@ -187,7 +186,7 @@ def extractSandboxArtifacts(
187
186
  completePath = mypath / f'{sandbox.sandbox}'
188
187
  else:
189
188
  completePath = Path(localFolder)
190
- from aepp import schema, catalog, identity,customerprofile, segmentation
189
+ from aepp import schema, catalog, identity,customerprofile, segmentation, tags
191
190
  sch = schema.Schema(config=sandbox)
192
191
  cat = catalog.Catalog(config=sandbox)
193
192
  ide = identity.Identity(config=sandbox,region=region)
@@ -223,6 +222,16 @@ def extractSandboxArtifacts(
223
222
  mergePolicyPath.mkdir(exist_ok=True)
224
223
  audiencePath = completePath / 'audience'
225
224
  audiencePath.mkdir(exist_ok=True)
225
+ tagPath = completePath / 'tag'
226
+ tagPath.mkdir(exist_ok=True)
227
+ ## handling tags
228
+ tag_manager = tags.Tags(config=sandbox)
229
+ all_tags = tag_manager.getTags()
230
+ dict_id_name = {tag['id']:tag['name'] for tag in all_tags}
231
+ for tag in all_tags:
232
+ safe_name = __titleSafe__(tag.get('name','unknown'))
233
+ with open(f"{tagPath / safe_name}.json",'w') as f:
234
+ json.dump(tag,f,indent=2)
226
235
  myclasses = sch.getClasses()
227
236
  classesGlobal = sch.getClassesGlobal()
228
237
  behaviors = sch.getBehaviors()
@@ -290,6 +299,9 @@ def extractSandboxArtifacts(
290
299
  datasets = cat.getDataSets()
291
300
  for key,value in datasets.items():
292
301
  value['id'] = key
302
+ if len(value.get('unifiedTags',[])) > 0:
303
+ tag_names = [dict_id_name.get(tag_id) for tag_id in value.get('unifiedTags',[])]
304
+ value['unifiedTags'] = tag_names
293
305
  with open(f"{datasetPath / value.get('tags',{}).get('adobe/pqs/table',[key])[0]}.json",'w') as f:
294
306
  json.dump(value,f,indent=2)
295
307
  identities = ide.getIdentities()
@@ -307,6 +319,9 @@ def extractSandboxArtifacts(
307
319
  audiences = mysegmentation.getAudiences()
308
320
  for el in audiences:
309
321
  safe_name = __titleSafe__(el.get('name','unknown'))
322
+ if len(el.get('tags',[])) > 0:
323
+ tag_names = [dict_id_name.get(tag_id) for tag_id in el.get('tags',[])]
324
+ el['tags'] = tag_names
310
325
  with open(f"{audiencePath / safe_name}.json",'w') as f:
311
326
  json.dump(el,f,indent=2)
312
327
 
@@ -345,9 +360,17 @@ def extractSandboxArtifact(
345
360
  with open(f'{completePath}/config.json','w') as f:
346
361
  json.dump(globalConfig,f,indent=2)
347
362
 
348
- from aepp import schema, catalog, identity
363
+ from aepp import schema, catalog, tags
349
364
  sch = schema.Schema(config=sandbox)
350
365
  cat = catalog.Catalog(config=sandbox)
366
+ ### taking care of tas
367
+ tagPath = completePath / 'tag'
368
+ tagPath.mkdir(exist_ok=True)
369
+ tag_manager = tags.Tags(config=sandbox)
370
+ all_tags = tag_manager.getTags()
371
+ dict_tag_id_name = {tag['id']:tag['name'] for tag in all_tags}
372
+ with open(f'{tagPath}/tags.json','w') as f:
373
+ json.dump(all_tags,f,indent=2)
351
374
  if artifactType == 'class':
352
375
  __extractClass__(artifact,completePath,sandbox)
353
376
  elif artifactType == 'schema':
@@ -357,16 +380,16 @@ def extractSandboxArtifact(
357
380
  elif artifactType == 'datatype':
358
381
  __extractDataType__(artifact,completePath,sandbox)
359
382
  elif artifactType == 'dataset':
360
- __extractDataset__(artifact,completePath,sandbox,region)
383
+ __extractDataset__(artifact,completePath,sandbox,region,dict_tag_id_name)
361
384
  elif artifactType == 'identity':
362
385
  __extractIdentity__(artifact,region,completePath,sandbox)
363
386
  elif artifactType == 'mergepolicy':
364
- __extractMergePolicy__(artifact,completePath,sandbox)
387
+ __extractMergePolicy__(artifact,completePath,sandbox,dict_tag_id_name=dict_tag_id_name)
365
388
  elif artifactType == 'audience':
366
- __extractAudience__(artifact,completePath,sandbox)
389
+ __extractAudience__(artifact,completePath,sandbox,dict_tag_id_name)
367
390
  else:
368
391
  raise ValueError("artifactType not recognized")
369
-
392
+
370
393
  def __extractClass__(classEl: str,folder: Union[str, Path] = None,sandbox: 'ConnectObject' = None):
371
394
  classPath = Path(folder) / 'class'
372
395
  classPath.mkdir(exist_ok=True)
@@ -511,7 +534,7 @@ def __extractIdentity__(identityStr: str,region:str=None,folder: Union[str, Path
511
534
  with open(f"{identityPath / file_name}.json",'w') as f:
512
535
  json.dump(myIdentity,f,indent=2)
513
536
 
514
- def __extractDataset__(dataset: str,folder: Union[str, Path] = None,sandbox: 'ConnectObject' = None, region:str=None):
537
+ def __extractDataset__(dataset: str,folder: Union[str, Path] = None,sandbox: 'ConnectObject' = None, region:str=None,dict_tag_id_name: dict = None,**kwargs):
515
538
  from aepp import catalog
516
539
  cat = catalog.Catalog(config=sandbox)
517
540
  datasets = cat.getDataSets()
@@ -520,6 +543,9 @@ def __extractDataset__(dataset: str,folder: Union[str, Path] = None,sandbox: 'Co
520
543
  if key == dataset or value.get('tags',{}).get('adobe/pqs/table',[''])[0] == dataset or value.get('name','') == dataset:
521
544
  myDataset = value
522
545
  myDataset['id'] = key
546
+ if dict_tag_id_name is not None and len(myDataset.get('unifiedTags',[])) > 0:
547
+ tag_names = [dict_tag_id_name.get(tag_id) for tag_id in myDataset.get('unifiedTags',[])]
548
+ myDataset['unifiedTags'] = tag_names
523
549
  if myDataset is None:
524
550
  raise ValueError("Dataset not found")
525
551
  datasetPath = Path(folder) / 'dataset'
@@ -531,7 +557,7 @@ def __extractDataset__(dataset: str,folder: Union[str, Path] = None,sandbox: 'Co
531
557
  if schema is not None:
532
558
  __extractSchema__(schema,folder,sandbox,region)
533
559
 
534
- def __extractMergePolicy__(mergePolicy: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None,region:str=None):
560
+ def __extractMergePolicy__(mergePolicy: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None,region:str=None,dict_tag_id_name: dict = None,**kwargs):
535
561
  from aepp import customerprofile
536
562
  ups = customerprofile.Profile(config=sandbox)
537
563
  mymergePolicies = ups.getMergePolicies()
@@ -539,13 +565,13 @@ def __extractMergePolicy__(mergePolicy: str = None,folder:Union[str, Path]=None,
539
565
  if mymergePolicy['attributeMerge'].get('type','timestampOrdered') == 'dataSetPrecedence':
540
566
  list_ds = mymergePolicy['attributeMerge'].get('order',[])
541
567
  for ds in list_ds:
542
- __extractDataset__(ds,folder,sandbox,region)
568
+ __extractDataset__(ds,folder,sandbox,region,dict_tag_id_name=dict_tag_id_name)
543
569
  mergePolicyPath = Path(folder) / 'mergePolicy'
544
570
  mergePolicyPath.mkdir(exist_ok=True)
545
571
  with open(f"{mergePolicyPath / mymergePolicy.get('id','unknown')}.json",'w') as f:
546
572
  json.dump(mymergePolicy,f,indent=2)
547
573
 
548
- def __extractAudience__(audienceName: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None):
574
+ def __extractAudience__(audienceName: str = None,folder:Union[str, Path]=None, sandbox: 'ConnectObject' = None,dict_tag_id_name: dict = None,**kwargs):
549
575
  from aepp import segmentation
550
576
  mysegmentation = segmentation.Segmentation(config=sandbox)
551
577
  audiences = mysegmentation.getAudiences()
@@ -553,5 +579,8 @@ def __extractAudience__(audienceName: str = None,folder:Union[str, Path]=None, s
553
579
  audiencePath = Path(folder) / 'audience'
554
580
  audiencePath.mkdir(exist_ok=True)
555
581
  safe_name = __titleSafe__(myaudience.get('name','unknown'))
582
+ if len(myaudience.get('tags',[])) > 0 and dict_tag_id_name is not None:
583
+ tag_names = [dict_tag_id_name.get(tag_id) for tag_id in myaudience.get('tags',[])]
584
+ myaudience['tags'] = tag_names
556
585
  with open(f"{audiencePath / safe_name}.json",'w') as f:
557
586
  json.dump(myaudience,f,indent=2)
aepp/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.2-2"
1
+ __version__ = "0.5.2-3"
aepp/catalog.py CHANGED
@@ -489,6 +489,7 @@ class Catalog:
489
489
  identityEnabled:bool=False,
490
490
  upsert:bool=False,
491
491
  tags:dict=None,
492
+ unifiedTags:list[str]=None,
492
493
  systemLabels:list[str]=None,
493
494
  **kwargs)-> dict:
494
495
  """
@@ -503,6 +504,7 @@ class Catalog:
503
504
  upsert : OPTIONAL : If the dataset to be created with profile enbaled and Upsert capability.
504
505
  tags : OPTIONAL : set of attribute to add as tags.
505
506
  systemLabels : OPTIONAL : A list of string to attribute system based label on creation.
507
+ unifiedTags : OPTIONAL : a list of tags to be added to the dataset
506
508
  possible kwargs
507
509
  requestDataSource : Set to true if you want Catalog to create a dataSource on your behalf; otherwise, pass a dataSourceId in the body.
508
510
  """
@@ -533,6 +535,8 @@ class Catalog:
533
535
  data['tags']["unifiedIdentity"] = ["enabled:true"]
534
536
  if upsert:
535
537
  data['tags']['unifiedProfile'] = ["enabled: true","isUpsert: true"]
538
+ if unifiedTags is not None and type(unifiedTags) == list:
539
+ data["unifiedTags"] = unifiedTags
536
540
  if tags is not None and type(tags) == dict:
537
541
  for key in tags:
538
542
  data['tags'][key] = tags[key]
aepp/cli/__main__.py CHANGED
@@ -1,4 +1,3 @@
1
- from ast import arg
2
1
  from matplotlib.pyplot import table
3
2
  import aepp
4
3
  from aepp import synchronizer, schema, schemamanager, fieldgroupmanager, datatypemanager, identity, queryservice,catalog,flowservice,sandboxes, segmentation
@@ -126,15 +125,20 @@ class ServiceShell(cmd.Cmd):
126
125
  """Change the current sandbox after configuration"""
127
126
  parser = argparse.ArgumentParser(prog='change sandbox', add_help=True)
128
127
  parser.add_argument("sandbox", help="sandbox name to switch to")
129
- args = parser.parse_args(shlex.split(args))
130
- self.sandbox = str(args.sandbox) if args.sandbox else console.print(Panel("(!) Please provide a sandbox name using -sx or --sandbox", style="red"))
131
- if self.config is not None:
132
- if args.sandbox:
133
- self.config.setSandbox(str(args.sandbox))
134
- self.prompt = f"{self.config.sandbox}> "
135
- console.print(Panel(f"Sandbox changed to: [bold green]{self.config.sandbox}[/bold green]", style="blue"))
136
- else:
137
- console.print(Panel("(!) You must configure the connection first using the 'config' command.", style="red"))
128
+ try:
129
+ args = parser.parse_args(shlex.split(args))
130
+ self.sandbox = str(args.sandbox) if args.sandbox else console.print(Panel("(!) Please provide a sandbox name using -sx or --sandbox", style="red"))
131
+ if self.config is not None:
132
+ if args.sandbox:
133
+ self.config.setSandbox(str(args.sandbox))
134
+ self.prompt = f"{self.config.sandbox}> "
135
+ console.print(Panel(f"Sandbox changed to: [bold green]{self.config.sandbox}[/bold green]", style="blue"))
136
+ else:
137
+ console.print(Panel("(!) You must configure the connection first using the 'config' command.", style="red"))
138
+ except Exception as e:
139
+ console.print(f"(!) Error: {str(e)}", style="red")
140
+ except SystemExit:
141
+ return
138
142
 
139
143
  @login_required
140
144
  def do_get_sandboxes(self, args:Any) -> None:
@@ -890,8 +894,9 @@ class ServiceShell(cmd.Cmd):
890
894
  return
891
895
 
892
896
  @login_required
893
- def do_get_datasets_tableName(self, args:Any) -> None:
894
- parser = argparse.ArgumentParser(prog='get_datasets', add_help=True)
897
+ def do_get_datasets_tableNames(self, args:Any) -> None:
898
+ """List all datasets with their table names in the current sandbox"""
899
+ parser = argparse.ArgumentParser(prog='get_datasets_tableNames', add_help=True)
895
900
  try:
896
901
  args = parser.parse_args(shlex.split(args))
897
902
  aepp_cat = catalog.Catalog(config=self.config)
@@ -1063,10 +1068,11 @@ class ServiceShell(cmd.Cmd):
1063
1068
  def do_get_identities(self, args:Any) -> None:
1064
1069
  """List all identities in the current sandbox"""
1065
1070
  parser = argparse.ArgumentParser(prog='get_identities', add_help=True)
1066
- parser.add_argument("-r","--region", help="Region to get identities from: 'ndl2' (default), 'va7', 'aus5', 'can2', 'ind2'", default='ndl2')
1071
+ parser.add_argument("-r","--region", help="Region to get identities from: 'ndl2' (default), 'va7', 'aus5', 'can2', 'ind2'", default='ndl2',type=str)
1067
1072
  parser.add_argument("-co","--custom_only",help="Get only custom identities", default=False,type=bool)
1068
1073
  try:
1069
1074
  args = parser.parse_args(shlex.split(args))
1075
+ region = args.region if args.region else 'ndl2'
1070
1076
  aepp_identity = identity.Identity(config=self.config,region=args.region)
1071
1077
  identities = aepp_identity.getIdentities(only_custom=args.custom_only)
1072
1078
  df_identites = pd.DataFrame(identities)
@@ -1123,7 +1129,7 @@ class ServiceShell(cmd.Cmd):
1123
1129
  table.add_column("Name", style="magenta")
1124
1130
  table.add_column("Evaluation", style="yellow")
1125
1131
  table.add_column("Total Profiles", style="green")
1126
- table.add_column("Shared", style="white")
1132
+ table.add_column("In Flow", style="white")
1127
1133
  for aud in audiences:
1128
1134
  table.add_row(
1129
1135
  aud.get("id","N/A"),
@@ -1138,6 +1144,36 @@ class ServiceShell(cmd.Cmd):
1138
1144
  except SystemExit:
1139
1145
  return
1140
1146
 
1147
+ @login_required
1148
+ def do_get_tags(self,args)->None:
1149
+ """
1150
+ Provide the list of tags defined in the current organization
1151
+ """
1152
+ parser = argparse.ArgumentParser(prog='get_tags', add_help=True)
1153
+ try:
1154
+ from aepp import tags
1155
+ args = parser.parse_args(shlex.split(args))
1156
+ aepp_tag = tags.Tags(config=self.config)
1157
+ tags = aepp_tag.getTags()
1158
+ df_tags = pd.DataFrame(tags)
1159
+ df_tags.to_csv(f"tags.csv",index=False)
1160
+ console.print(f"Tags exported to tags.csv", style="green")
1161
+ table = Table(title=f"Tags in Organization: {self.config.org_id}")
1162
+ table.add_column("ID", style="cyan")
1163
+ table.add_column("Name", style="magenta")
1164
+ table.add_column("Category Name", style="white")
1165
+ for _, tg in df_tags.iterrows():
1166
+ table.add_row(
1167
+ str(tg.get("id","N/A")),
1168
+ tg.get("name","N/A"),
1169
+ tg.get("tagCategoryName","N/A"),
1170
+ )
1171
+ console.print(table)
1172
+ except Exception as e:
1173
+ console.print(f"(!) Error: {str(e)}", style="red")
1174
+ except SystemExit:
1175
+ return
1176
+
1141
1177
  @login_required
1142
1178
  def do_get_flows(self, args:Any) -> None:
1143
1179
  """List flows in the current sandbox based on parameters provided. By default, list all sources and destinations."""
@@ -1440,7 +1476,7 @@ class ServiceShell(cmd.Cmd):
1440
1476
  conn = aepp_query.connection()
1441
1477
  iqs2 = queryservice.InteractiveQuery2(conn)
1442
1478
  result:pd.DataFrame = iqs2.query(sql=args.sql_query)
1443
- result.to_csv(f"query_result_{int(datetime.now().timestamp())}.csv", index=False)
1479
+ result.sample(5).to_csv(f"query_result_{int(datetime.now().timestamp())}.csv", index=False)
1444
1480
  console.print(f"Query result exported to query_result_{int(datetime.now().timestamp())}.csv", style="green")
1445
1481
  console.print(result)
1446
1482
  except Exception as e:
@@ -1452,11 +1488,11 @@ class ServiceShell(cmd.Cmd):
1452
1488
  @login_required
1453
1489
  def do_extractArtifacts(self,args:Any) -> None:
1454
1490
  """extractArtifacts localfolder"""
1455
- console.print("Extracting artifacts...", style="blue")
1456
- parser = argparse.ArgumentParser(prog='extractArtifacts', description='Extract artifacts from AEP')
1491
+ parser = argparse.ArgumentParser(prog='extractArtifacts', description='Extract artifacts from AEP',add_help=True)
1457
1492
  parser.add_argument('-lf','--localfolder', help='Local folder to extract artifacts to', default='./extractions')
1458
1493
  parser.add_argument('-rg','--region', help='Region to extract artifacts from: "ndl2" (default), "va7", "aus5", "can2", "ind2"',default='ndl2')
1459
1494
  try:
1495
+ console.print("Extracting artifacts...", style="blue")
1460
1496
  args = parser.parse_args(shlex.split(args))
1461
1497
  aepp.extractSandboxArtifacts(
1462
1498
  sandbox=self.config,
@@ -1466,18 +1502,19 @@ class ServiceShell(cmd.Cmd):
1466
1502
  console.print(Panel("Extraction completed!", style="green"))
1467
1503
  except SystemExit:
1468
1504
  return
1505
+ except Exception as e:
1506
+ console.print(f"(!) Error: {str(e)}", style="red")
1469
1507
 
1470
1508
  @login_required
1471
1509
  def do_extractArtifact(self,args:Any) -> None:
1472
1510
  """extractArtifacts localfolder"""
1473
- console.print("Extracting artifact...", style="blue")
1474
- parser = argparse.ArgumentParser(prog='extractArtifact', description='Extract artifacts from AEP')
1511
+ parser = argparse.ArgumentParser(prog='extractArtifact', description='Extract artifacts from AEP',add_help=True)
1475
1512
  parser.add_argument('artifact', help='artifact to extract (name or id): "schema","fieldgroup","datatype","descriptor","dataset","identity","mergepolicy","audience"')
1476
1513
  parser.add_argument('-at','--artifactType', help='artifact type ')
1477
1514
  parser.add_argument('-lf','--localfolder', help='Local folder to extract artifacts to',default='extractions')
1478
1515
  parser.add_argument('-rg','--region', help='Region to extract artifacts from: "ndl2" (default), "va7", "aus5", "can2", "ind2"',default='ndl2')
1479
-
1480
1516
  try:
1517
+ console.print("Extracting artifact...", style="blue")
1481
1518
  args = parser.parse_args(shlex.split(args))
1482
1519
  aepp.extractSandboxArtifact(
1483
1520
  artifact=args.artifact,
@@ -1488,12 +1525,13 @@ class ServiceShell(cmd.Cmd):
1488
1525
  console.print("Extraction completed!", style="green")
1489
1526
  except SystemExit:
1490
1527
  return
1528
+ except Exception as e:
1529
+ console.print(f"(!) Error: {str(e)}", style="red")
1491
1530
 
1492
1531
  @login_required
1493
1532
  def do_sync(self,args:Any) -> None:
1494
1533
  """extractArtifacts localfolder"""
1495
- console.print("Syncing artifact...", style="blue")
1496
- parser = argparse.ArgumentParser(prog='extractArtifact', description='Extract artifacts from AEP')
1534
+ parser = argparse.ArgumentParser(prog='extractArtifact', description='Extract artifacts from AEP',add_help=True)
1497
1535
  parser.add_argument('artifact', help='artifact to extract (name or id): "schema","fieldgroup","datatype","descriptor","dataset","identity","mergepolicy","audience"')
1498
1536
  parser.add_argument('-at','--artifactType', help='artifact type ',type=str)
1499
1537
  parser.add_argument('-t','--targets', help='target sandboxes',nargs='+',type=str)
@@ -1504,18 +1542,22 @@ class ServiceShell(cmd.Cmd):
1504
1542
  try:
1505
1543
  args = parser.parse_args(shlex.split(args))
1506
1544
  console.print("Initializing Synchronizor...", style="blue")
1545
+ if args.region:
1546
+ region=args.region
1547
+ else:
1548
+ region='ndl2'
1507
1549
  if args.baseSandbox:
1508
1550
  synchronizor = synchronizer.Synchronizer(
1509
1551
  config=self.config,
1510
1552
  targets=args.targets,
1511
- region=args.region,
1553
+ region=region,
1512
1554
  baseSandbox=args.baseSandbox,
1513
1555
  )
1514
1556
  elif args.localfolder:
1515
1557
  synchronizor = synchronizer.Synchronizer(
1516
1558
  config=self.config,
1517
1559
  targets=args.targets,
1518
- region=args.region,
1560
+ region=region,
1519
1561
  localFolder=args.localfolder,
1520
1562
  )
1521
1563
  console.print("Starting Sync...", style="blue")
aepp/fieldgroupmanager.py CHANGED
@@ -241,7 +241,7 @@ class FieldGroupManager:
241
241
  if '/datatypes/' in str(self.fieldGroup):
242
242
  dataTypeSearch = f"(https://ns.adobe.com/{self.tenantId[1:]}/datatypes/[0-9a-z]+?)'"
243
243
  dataTypes = re.findall(dataTypeSearch,str(self.fieldGroup.get('definitions')))
244
- for file in self.datatypeFolder.glob('*.json'):
244
+ for file in folder.glob('*.json'):
245
245
  tmp_def = json.load(FileIO(file))
246
246
  if tmp_def.get('$id') in dataTypes or tmp_def.get('meta:altId') in dataTypes:
247
247
  dt_manager = DataTypeManager(tmp_def,localFolder=self.localfolder,sandbox=self.sandbox,tenantId=self.tenantId)
aepp/identity.py CHANGED
@@ -86,8 +86,8 @@ class Identity:
86
86
  self.sandbox = self.connector.config["sandbox"]
87
87
 
88
88
  environment = config["environment"]
89
- base_url = f"https://platform-{region}.adobe.io"
90
-
89
+ #base_url = f"https://platform-{region}.adobe.io"
90
+ base_url = f"https://platform.adobe.io"
91
91
  if environment != "prod":
92
92
  base_url = f"https://platform-{environment}-{region}.adobe.io"
93
93
 
aepp/schema.py CHANGED
@@ -74,8 +74,8 @@ class Schema:
74
74
 
75
75
  def __init__(
76
76
  self,
77
- containerId: str = "tenant",
78
77
  config: Union[dict,ConnectObject] = aepp.config.config_object,
78
+ containerId: str = "tenant",
79
79
  header=aepp.config.header,
80
80
  loggingObject: dict = None,
81
81
  **kwargs,
@@ -83,12 +83,10 @@ class Schema:
83
83
  """
84
84
  Copy the token and header and initiate the object to retrieve schema elements.
85
85
  Arguments:
86
- containerId : OPTIONAL : "tenant"(default) or "global"
87
- loggingObject : OPTIONAL : logging object to log messages.
88
86
  config : OPTIONAL : config object in the config module.
87
+ containerId : OPTIONAL : "tenant"(default) or "global"
89
88
  header : OPTIONAL : header object in the config module.
90
- possible kwargs:
91
- x-sandbox-name : name of the sandbox you want to use (default : "prod").
89
+ loggingObject : OPTIONAL : logging object to log messages.
92
90
  """
93
91
  if loggingObject is not None and sorted(
94
92
  ["level", "stream", "format", "filename", "file"]
aepp/schemamanager.py CHANGED
@@ -167,8 +167,10 @@ class SchemaManager:
167
167
  fgM = FieldGroupManager(fieldGroup=definition,schemaAPI=self.schemaAPI,localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
168
168
  self.fieldGroupsManagers[fgM.title] = fgM
169
169
  for clas in self.classIds:
170
+ clsM = None
170
171
  clsM = ClassManager(clas,schemaAPI=self.schemaAPI,localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
171
- self.classManagers[clsM.title] = clsM
172
+ if clsM is not None:
173
+ self.classManagers[clsM.title] = clsM
172
174
  elif type(schema) == str:
173
175
  if self.schemaAPI is not None:
174
176
  self.schema = self.schemaAPI.getSchema(schema,full=False,schema_type='xed')
@@ -254,6 +256,7 @@ class SchemaManager:
254
256
  fgM = FieldGroupManager(fieldGroup=definition,schemaAPI=self.schemaAPI,localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
255
257
  self.fieldGroupsManagers[fgM.title] = fgM
256
258
  for clas in self.classIds:
259
+ clsM = None
257
260
  if self.localfolder is not None:
258
261
  found = False
259
262
  for folder in self.classFolder:
@@ -269,7 +272,8 @@ class SchemaManager:
269
272
  break
270
273
  elif self.schemaAPI is not None:
271
274
  clsM = ClassManager(clas,schemaAPI=self.schemaAPI,localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
272
- self.classManagers[clsM.title] = clsM
275
+ if clsM is not None:
276
+ self.classManagers[clsM.title] = clsM
273
277
  elif schema is None:
274
278
  self.STATE = "NEW"
275
279
  self.classId = schemaClass
@@ -284,8 +288,10 @@ class SchemaManager:
284
288
  ]
285
289
  }
286
290
  for clas in self.classIds:
291
+ clsM = None
287
292
  clsM = ClassManager(clas,schemaAPI=self.schemaAPI,localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
288
- self.classManagers[clsM.title] = clsM
293
+ if clsM is not None:
294
+ self.classManagers[clsM.title] = clsM
289
295
  if fieldGroups is not None and type(fieldGroups) == list:
290
296
  if fieldGroups[0] == str:
291
297
  for fgId in fieldGroups:
@@ -322,9 +328,11 @@ class SchemaManager:
322
328
  self.fieldGroupsManagers[fgM.title] = fgM
323
329
  elif fieldGroups[0] == dict:
324
330
  for fg in fieldGroups:
331
+ fgM = None
325
332
  self.fieldGroupIds.append(fg.get('$id'))
326
333
  fgM = FieldGroupManager(fg,schemaAPI=self.schemaAPI, localFolder=localFolder,tenantId=self.tenantId,sandbox=self.sandbox)
327
- self.fieldGroupsManagers[fgM.title] = fgM
334
+ if fgM is not None:
335
+ self.fieldGroupsManagers[fgM.title] = fgM
328
336
  self.fieldGroupTitles= tuple(fg.title for fg in list(self.fieldGroupsManagers.values()))
329
337
  self.fieldGroups = {fg.id:fg.title for fg in list(self.fieldGroupsManagers.values())}
330
338
  self.fieldGroupIds = tuple(fg.id for fg in list(self.fieldGroupsManagers.values()))
aepp/synchronizer.py CHANGED
@@ -20,7 +20,12 @@ from .configs import ConnectObject
20
20
 
21
21
  class Synchronizer:
22
22
  ## TO DO -> Add support for local environment
23
- def __init__(self,targets:list=None,config:'ConnectObject'=None,baseSandbox:str=None,region:str='nld2',localFolder:str|list|None=None):
23
+ def __init__(self,
24
+ targets:list|None=None,
25
+ config:ConnectObject|None=None,
26
+ baseSandbox:str|None=None,
27
+ region:str='nld2',
28
+ localFolder:str|list|None=None):
24
29
  """
25
30
  Setup the synchronizor object with the base sandbox and target sandbox.
26
31
  Arguments:
@@ -71,6 +76,19 @@ class Synchronizer:
71
76
  self.descriptorFolder = [folder / 'descriptor' for folder in self.localfolder]
72
77
  self.mergePolicyFolder = [folder / 'mergepolicy' for folder in self.localfolder]
73
78
  self.audienceFolder = [folder / 'audience' for folder in self.localfolder]
79
+ self.tagFolder = [folder / 'tag' for folder in self.localfolder]
80
+ self.dict_tag_name_id = {}
81
+ for folder in self.tagFolder:
82
+ try:
83
+ if folder.exists():
84
+ with open(folder / 'tags.json','r') as f:
85
+ tags_file = json.load(f)
86
+ for tag in tags_file:
87
+ self.dict_tag_name_id[tag['name']] = tag['id']
88
+ pass
89
+ except Exception as e:
90
+ print(f"could not load tags from folder {folder} : {e}")
91
+ pass
74
92
  if baseSandbox is not None:
75
93
  self.baseSandbox = baseSandbox
76
94
  else:
@@ -85,7 +103,7 @@ class Synchronizer:
85
103
  self.dict_baseComponents = {'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}}
86
104
  self.dict_targetComponents = {target:{'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}} for target in targets}
87
105
 
88
- def getSyncFieldGroupManager(self,fieldgroup:str,sandbox:str=None)-> dict:
106
+ def getSyncFieldGroupManager(self,fieldgroup:str,sandbox:str|None=None)-> dict:
89
107
  """
90
108
  Get a field group Manager from the synchronizer.
91
109
  It searches through the component cache to see if the FieldGroupManager for the target sandbox is already instantiated.
@@ -119,7 +137,7 @@ class Synchronizer:
119
137
  else:
120
138
  raise ValueError(f"the field group '{fieldgroup}' has not been synchronized to the sandbox '{sandbox}'")
121
139
 
122
- def getDatasetName(self,datasetId:str,sandbox:str=None)-> str:
140
+ def getDatasetName(self,datasetId:str,sandbox:str|None=None)-> str:
123
141
  """
124
142
  Get a dataset name from the synchronizer base on the ID of the dataset.
125
143
  Arguments:
@@ -139,7 +157,7 @@ class Synchronizer:
139
157
  else:
140
158
  raise ValueError(f"the dataset '{datasetId}' has not been synchronized to the sandbox '{sandbox}'")
141
159
 
142
- def syncComponent(self,component:Union[str,dict],componentType:str=None,force:bool=False,verbose:bool=False)-> dict:
160
+ def syncComponent(self,component:Union[str,dict],componentType:str|None=None,force:bool=False,verbose:bool=False)-> dict:
143
161
  """
144
162
  Synchronize a component to the target sandbox.
145
163
  The component could be a string (name or id of the component in the base sandbox) or a dictionary with the definition of the component.
@@ -235,6 +253,8 @@ class Synchronizer:
235
253
  for file in folder.glob('*.json'):
236
254
  ds_file = json.load(FileIO(file))
237
255
  if ds_file['id'] == component or ds_file['name'] == component:
256
+ if ds_file.get('UnifiedTags',[]) != [] and self.dict_tag_name_id is not None:
257
+ ds_file['unifiedTags'] = [self.dict_tag_name_id[tag_name] for tag_name in ds_file.get('UnifiedTags',[]) if tag_name in self.dict_tag_name_id.keys()]
238
258
  component = ds_file
239
259
  break
240
260
  if len(component) == 1: ## if the component is the catalog API response {'key': {dataset definition}}
@@ -263,6 +283,8 @@ class Synchronizer:
263
283
  for file in folder.glob('*.json'):
264
284
  au_file = json.load(FileIO(file))
265
285
  if au_file.get('id','') == component or au_file.get('name','') == component:
286
+ if au_file.get('tags',[]) != [] and self.dict_tag_name_id is not None:
287
+ au_file['tags'] = [self.dict_tag_name_id[tag_name] for tag_name in au_file.get('tags',[]) if tag_name in self.dict_tag_name_id.keys()]
266
288
  component = au_file
267
289
  break
268
290
  elif type(component) == dict:
@@ -345,6 +367,7 @@ class Synchronizer:
345
367
  raise TypeError("the baseDataType must be a DataTypeManager object")
346
368
  self.dict_baseComponents['datatype'][baseDataType.title] = baseDataType
347
369
  name_base_datatype = baseDataType.title
370
+ description_base_datatype = baseDataType.description
348
371
  for target in self.dict_targetsConfig.keys():
349
372
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
350
373
  t_datatype = None
@@ -362,7 +385,7 @@ class Synchronizer:
362
385
  base_paths = df_base['path'].tolist()
363
386
  target_paths = df_target['path'].tolist()
364
387
  diff_paths = list(set(base_paths) - set(target_paths))
365
- if len(diff_paths) > 0 or force==True: ## there are differences
388
+ if len(diff_paths) > 0 or description_base_datatype != t_datatype.description or force==True: ## there are differences
366
389
  base_datatypes_paths = baseDataType.getDataTypePaths()
367
390
  df_base_limited = df_base[df_base['origin'] == 'self'].copy() ## exclude field group native fields
368
391
  df_base_limited = df_base_limited[~df_base_limited['path'].isin(list(base_datatypes_paths.keys()))] ## exclude base of datatype rows
@@ -394,6 +417,7 @@ class Synchronizer:
394
417
  print(f"datatype '{name_base_datatype}' does not exist in target {target}, creating it")
395
418
  df_base = baseDataType.to_dataframe(full=True)
396
419
  new_datatype = datatypemanager.DataTypeManager(title=name_base_datatype,config=self.dict_targetsConfig[target],sandbox=target)
420
+ new_datatype.setDescription(description_base_datatype)
397
421
  base_datatypes_paths = baseDataType.getDataTypePaths()
398
422
  df_base_limited = df_base[df_base['origin'] == 'self'].copy() ## exclude field group native fields
399
423
  df_base_limited = df_base_limited[~df_base_limited['path'].isin(list(base_datatypes_paths.keys()))] ## exclude base of datatype rows
@@ -413,7 +437,7 @@ class Synchronizer:
413
437
  arrayBool = True
414
438
  path = path[:-4] ## removing the [] from the path
415
439
  new_datatype.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
416
- new_datatype.setDescription(baseDataType.description)
440
+ new_datatype.setDescription(description_base_datatype)
417
441
  res = new_datatype.createDataType()
418
442
  if '$id' in res.keys():
419
443
  t_datatype = datatypemanager.DataTypeManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -434,6 +458,7 @@ class Synchronizer:
434
458
  self.dict_baseComponents['fieldgroup'][baseFieldGroup.title] = baseFieldGroup
435
459
  name_base_fieldgroup = baseFieldGroup.title
436
460
  base_fg_classIds = baseFieldGroup.classIds
461
+ base_fg_description = baseFieldGroup.description
437
462
  for target in self.dict_targetsConfig.keys():
438
463
  t_fieldgroup = None
439
464
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
@@ -465,7 +490,9 @@ class Synchronizer:
465
490
  base_paths = df_base['path'].tolist()
466
491
  target_paths = df_target['path'].tolist()
467
492
  diff_paths = [path for path in base_paths if path not in target_paths]
468
- if len(diff_paths) > 0 or force==True:
493
+ if len(diff_paths) > 0 or base_fg_description != t_fieldgroup.description or force==True:
494
+ if verbose:
495
+ print(f"updating field group '{name_base_fieldgroup}' in target {target}")
469
496
  base_datatypes_paths = baseFieldGroup.getDataTypePaths()
470
497
  ## handling fieldgroup native fields
471
498
  df_base_limited = df_base[df_base['origin'] == 'fieldGroup'].copy() ## exclude datatypes
@@ -500,6 +527,8 @@ class Synchronizer:
500
527
  t_fieldgroup.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
501
528
  if len(t_fieldgroup.classIds) != len(fg_class_ids):
502
529
  t_fieldgroup.updateClassSupported(fg_class_ids)
530
+ if base_fg_description != t_fieldgroup.description:
531
+ t_fieldgroup.setDescription(base_fg_description)
503
532
  res = t_fieldgroup.updateFieldGroup()
504
533
  if '$id' not in res.keys():
505
534
  raise Exception(res)
@@ -547,7 +576,7 @@ class Synchronizer:
547
576
  arrayBool = True
548
577
  path = path[:-4] ## removing the [] from the path
549
578
  new_fieldgroup.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
550
- new_fieldgroup.setDescription(baseFieldGroup.description)
579
+ new_fieldgroup.setDescription(base_fg_description)
551
580
  res = new_fieldgroup.createFieldGroup()
552
581
  if '$id' in res.keys():
553
582
  t_fieldgroup = fieldgroupmanager.FieldGroupManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -571,6 +600,7 @@ class Synchronizer:
571
600
  self.dict_baseComponents['schema'][name_base_schema] = baseSchema
572
601
  descriptors = baseSchema.getDescriptors()
573
602
  base_field_groups_names = list(baseSchema.fieldGroups.values())
603
+ base_schema_description = baseSchema.description
574
604
  dict_base_fg_name_id = {name:fg_id for fg_id,name in baseSchema.fieldGroups.items()}
575
605
  for target in self.dict_targetsConfig.keys():
576
606
  targetSchemaAPI = schema.Schema(config=self.dict_targetsConfig[target])
@@ -582,9 +612,13 @@ class Synchronizer:
582
612
  t_schema = schemamanager.SchemaManager(targetSchemaAPI.data.schemas_altId[name_base_schema],config=self.dict_targetsConfig[target],sandbox=target)
583
613
  new_fieldgroups = [fg for fg in base_field_groups_names if fg not in t_schema.fieldGroups.values()]
584
614
  existing_fieldgroups = [fg for fg in base_field_groups_names if fg in t_schema.fieldGroups.values()]
585
- if len(new_fieldgroups) > 0 or force==True: ## if new field groups
615
+ if len(new_fieldgroups) > 0 or base_schema_description != t_schema.description or force==True: ## if new field groups
586
616
  if verbose:
587
- print('found new field groups to add to the schema')
617
+ if force == False:
618
+ print('found difference in the schema, updating it')
619
+ else:
620
+ print('force flag is set to True, updating the schema')
621
+ ## handling field groups
588
622
  for new_fieldgroup in new_fieldgroups:
589
623
  if baseSchema.tenantId[1:] not in dict_base_fg_name_id[new_fieldgroup]: ## ootb field group
590
624
  if verbose:
@@ -598,7 +632,7 @@ class Synchronizer:
598
632
  print(f"Creating new custom field group '{tmp_FieldGroup.title}'")
599
633
  self.__syncFieldGroup__(tmp_FieldGroup,verbose=verbose,force=force)
600
634
  t_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][new_fieldgroup].id)
601
- t_schema.setDescription(baseSchema.description)
635
+ t_schema.setDescription(base_schema_description)
602
636
  res = t_schema.updateSchema()
603
637
  if '$id' not in res.keys():
604
638
  raise Exception(res)
@@ -652,6 +686,7 @@ class Synchronizer:
652
686
  else:
653
687
  classId_toUse = baseClassId
654
688
  new_schema = schemamanager.SchemaManager(title=name_base_schema,config=self.dict_targetsConfig[target],schemaClass=classId_toUse,sandbox=target)
689
+ new_schema.setDescription(base_schema_description)
655
690
  for fg_name in base_field_groups_names:
656
691
  if baseSchema.tenantId[1:] not in dict_base_fg_name_id[fg_name]: ## ootb field group
657
692
  new_schema.addFieldGroup(dict_base_fg_name_id[fg_name])
@@ -665,7 +700,6 @@ class Synchronizer:
665
700
  tmp_FieldGroup = baseSchema.getFieldGroupManager(fg_name)
666
701
  self.__syncFieldGroup__(tmp_FieldGroup,force=force,verbose=verbose)
667
702
  new_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][fg_name].id)
668
- new_schema.setDescription(baseSchema.description)
669
703
  res = new_schema.createSchema()
670
704
  if '$id' in res.keys():
671
705
  t_schema = schemamanager.SchemaManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -739,9 +773,10 @@ class Synchronizer:
739
773
  baseIdentities = identityConn.getIdentities()
740
774
  elif self.localfolder is not None:
741
775
  baseIdentities = []
742
- for file in self.identityFolder.glob('*.json'):
743
- id_file = json.load(FileIO(file))
744
- baseIdentities.append(id_file)
776
+ for folder in self.identityFolder:
777
+ for file in folder.glob('*.json'):
778
+ id_file = json.load(FileIO(file))
779
+ baseIdentities.append(id_file)
745
780
  if baseIdentityNS not in [el['xdm:namespace'].lower() for el in target_identitiesDecs]: ## identity descriptor does not exists in target schema
746
781
  def_identity = [el for el in baseIdentities if el['code'].lower() == baseIdentityNS][0]
747
782
  self.__syncIdentity__(def_identity,verbose=verbose)
@@ -852,9 +887,10 @@ class Synchronizer:
852
887
  baseIdentities = identityConn.getIdentities()
853
888
  elif self.localfolder is not None:
854
889
  baseIdentities = []
855
- for file in self.identityFolder.glob('*.json'):
856
- id_file = json.load(FileIO(file))
857
- baseIdentities.append(id_file)
890
+ for folder in self.identityFolder:
891
+ for file in folder.glob('*.json'):
892
+ id_file = json.load(FileIO(file))
893
+ baseIdentities.append(id_file)
858
894
  def_identity = [el for el in baseIdentities if el['code'] == baseIdentityNS][0]
859
895
  self.__syncIdentity__(def_identity,verbose=verbose)
860
896
  target_referenceIdentity = [desc for desc in target_descriptors if desc['@type'] == 'xdm:descriptorReferenceIdentity']
@@ -916,6 +952,7 @@ class Synchronizer:
916
952
  self.dict_baseComponents['datasets'][baseDataset['name']] = baseDataset
917
953
  base_datasetName = baseDataset['name']
918
954
  base_dataset_related_schemaId = baseDataset['schemaRef']['id']
955
+ base_dataset_unifiedTagIds = baseDataset.get('unifiedTags',[])
919
956
  if self.baseConfig is not None:
920
957
  baseSchemaAPI = schema.Schema(config=self.baseConfig)
921
958
  base_schemas = baseSchemaAPI.getSchemas()
@@ -929,7 +966,7 @@ class Synchronizer:
929
966
  for target in self.dict_targetsConfig.keys():
930
967
  targetCatalog = catalog.Catalog(config=self.dict_targetsConfig[target])
931
968
  t_datasets = targetCatalog.getDataSets()
932
- if base_datasetName not in targetCatalog.data.ids.keys(): ## only taking care if dataset does not exist
969
+ if base_datasetName not in targetCatalog.data.ids.keys(): ## if dataset does not exist
933
970
  if verbose:
934
971
  print(f"dataset '{base_datasetName}' does not exist in target {target}, creating it")
935
972
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
@@ -940,15 +977,18 @@ class Synchronizer:
940
977
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
941
978
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
942
979
  targetSchemaId = self.dict_targetComponents[target]['schema'][base_dataset_related_schemaName].id
943
- res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId)
980
+ res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId,unifiedTags=base_dataset_unifiedTagIds)
944
981
  self.dict_targetComponents[target]['datasets'][base_datasetName] = res
945
982
  else: ## schema already exists in target
946
983
  if verbose:
947
984
  print(f"related schema '{base_dataset_related_schemaName}' does exist in target {target}, checking it")
948
985
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
949
986
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
950
- targetSchemaId = targetSchema.data.schemas_id[base_dataset_related_schemaName]
951
- res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId)
987
+ target_schema = self.dict_targetComponents[target]['schema'][base_dataset_related_schemaName]
988
+ targetSchemaId = target_schema.id
989
+ print(f"Target Schema ID: {targetSchemaId}")
990
+ print(f"unified Tags: {base_dataset_unifiedTagIds}")
991
+ res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId,unifiedTags=base_dataset_unifiedTagIds)
952
992
  self.dict_targetComponents[target]['datasets'][base_datasetName] = res
953
993
  else: ## dataset already exists in target
954
994
  if verbose:
@@ -958,6 +998,16 @@ class Synchronizer:
958
998
  t_schemas = targetSchema.getSchemas()
959
999
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
960
1000
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
1001
+ if verbose:
1002
+ print(f"dataset '{base_datasetName}' schema synchronized, checking unified tags")
1003
+ if len(base_dataset_unifiedTagIds) > 0:
1004
+ t_dataset_unifiedTagIds = t_dataset.get('unifiedTags',[])
1005
+ tags_toAdd = [tagId for tagId in base_dataset_unifiedTagIds if tagId not in t_dataset_unifiedTagIds]
1006
+ if len(tags_toAdd) > 0:
1007
+ if verbose:
1008
+ print(f"adding unified tags to dataset '{base_datasetName}' in target {target}")
1009
+ t_dataset['unifiedTags'] = t_dataset_unifiedTagIds + tags_toAdd
1010
+ res = targetCatalog.putDataset(t_dataset['id'],t_dataset)
961
1011
  self.dict_targetComponents[target]['datasets'][base_datasetName] = t_dataset
962
1012
 
963
1013
  def __syncMergePolicy__(self,mergePolicy:dict,verbose:bool=False)->None:
@@ -1036,7 +1086,8 @@ class Synchronizer:
1036
1086
  "expression":baseAudience.get('expression',[]),
1037
1087
  "ansibleDataModel":baseAudience.get('ansibleDataModel',{}),
1038
1088
  "profileInstanceId":baseAudience.get('profileInstanceId',''),
1039
- "evaluationInfo":baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1089
+ "evaluationInfo":baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}}),
1090
+ "tags":baseAudience.get('tags',[])
1040
1091
  }
1041
1092
  res = targetAudiences.createAudience(audienceDef)
1042
1093
  if 'id' in res.keys():
@@ -1052,6 +1103,7 @@ class Synchronizer:
1052
1103
  t_audience['expression'] = baseAudience.get('expression',[])
1053
1104
  t_audience['ansibleDataModel'] = baseAudience.get('ansibleDataModel',{})
1054
1105
  t_audience['evaluationInfo'] = baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1106
+ t_audience['tags'] = baseAudience.get('tags',[])
1055
1107
  res = targetAudiences.putAudience(t_audience['id'],t_audience)
1056
1108
  self.dict_targetComponents[target]['audience'][audience_name] = res
1057
1109
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aepp
3
- Version: 0.5.2.post2
3
+ Version: 0.5.2.post3
4
4
  Summary: Package to manage AEP API endpoint and some helper functions
5
5
  Author-email: Julien Piccini <piccini.julien@gmail.com>
6
6
  License: Apache-2.0
@@ -1,7 +1,7 @@
1
- aepp/__init__.py,sha256=rsU4OMu3pJIgy8emJAD6lhAfqH0-raZ6GyIMJanNBdM,27912
2
- aepp/__version__.py,sha256=Cg8MGKgYvVWLyB1zXrrIwnb1heE7cYSFsuUcUYCJuQk,23
1
+ aepp/__init__.py,sha256=slCyBeqU3LA_TAS59z2Ng-MnVIJ-1iKt6PrNu1QWHRk,29641
2
+ aepp/__version__.py,sha256=p2cBUcJxYGyXCezEzS7TxbGXCMvfeDr--WndrJa0ASc,23
3
3
  aepp/accesscontrol.py,sha256=PB3FcrO4bvDjdNxjHx7p_20hp4ahBXewoOSxuTGMXC8,17423
4
- aepp/catalog.py,sha256=hK9m3SAP0fhgkYqu14Tcfq14qBhw54tLCOF0mH31b1M,68237
4
+ aepp/catalog.py,sha256=4s3Uzm26gGW4a55dyDRvjhmY8laQiJ0iyzT2s2A1h6I,68484
5
5
  aepp/classmanager.py,sha256=16hx_hptg3PYwmezZCr9dLjvOkNSunih1PK3Q-iPoZY,66099
6
6
  aepp/config.py,sha256=232fcO8JaYJnS4glf8Ebnx9rCdHshZBVaVUbhoOAXkc,2543
7
7
  aepp/configs.py,sha256=5rRWJoUQDDaj3AAXWdKCZBZA_Xb7q1Hd58OkWhzwK34,16151
@@ -16,30 +16,30 @@ aepp/destination.py,sha256=_-Hrzb_LUNaRrqR4Y3EZZuTisIs0nF3KH_GZpFjryrs,24348
16
16
  aepp/destinationinstanceservice.py,sha256=zEZbKi519cOOdxWMZ3mv9ccP6yjNAlNwqrQMlzW_gO4,5378
17
17
  aepp/edge.py,sha256=F2QZApmITObXB8hRWXftHBZ82KNqVZ7iSNuovT8qnk4,16041
18
18
  aepp/exportDatasetToDataLandingZone.py,sha256=C6jg3XttFC-0mswa3ypZb6qx3MCQ8_A_3kyKspurXJA,18629
19
- aepp/fieldgroupmanager.py,sha256=4A7u3tx63HzcDiMqyZs5TRy-LN0Xsf7VGLdt1_exw2Q,103591
19
+ aepp/fieldgroupmanager.py,sha256=KuU5WDDh48C_3AYwjzRBPXHoLKxHZSpzel2_W18qsYo,103578
20
20
  aepp/flowservice.py,sha256=WizgwY6TYn1kiLxQt6Y3d7XgoLAb9imXrFXtt94hhog,107612
21
21
  aepp/hygiene.py,sha256=VEspnyu9eUlcK3wLeJYclaFaOWl5G5I5MRwmVA-RnUg,15385
22
- aepp/identity.py,sha256=E9MCIgntScMssduqKZqehT6FqSfTjWHcq7E7wESj3Zc,20833
22
+ aepp/identity.py,sha256=XH_prFTFWVtid-BbjD7B_4Z7vpebmIo4z5ecvAZf1II,20881
23
23
  aepp/ingestion.py,sha256=OamE7NDei2Ev5vXIDkMlzvdyBaN41nkIGmpAnUlQoZI,22372
24
24
  aepp/observability.py,sha256=bKe74nlXYB5E5syh7Lj4VqIgwUI3OjMxK383P05EdLU,9951
25
25
  aepp/policy.py,sha256=JbpvfCKJl2kE2McK2mn_ZI5HKd_6pTnrfMoUdyJesWQ,24924
26
26
  aepp/privacyservice.py,sha256=V6BkJeZG1LDBCyEQm9Gx0i68iRHG6uxSJiVnXzkHapI,8790
27
27
  aepp/queryservice.py,sha256=wB9GiaMwJszNjqkYjkfEDUhdT2IoI22jA3Kt_6ki4Hk,62373
28
28
  aepp/sandboxes.py,sha256=UwlSFkO2OOmH--6ISz8rxwDu2LcLH1MPqoH7yOEAZHc,29363
29
- aepp/schema.py,sha256=aLYDM5lCANNddk-NZPNxCxazg9HpELalKlFxQz55dRs,123111
30
- aepp/schemamanager.py,sha256=G3JhVikWkaT14F8vORDfGJGivarvU2AgKO1RB-1pzdM,54117
29
+ aepp/schema.py,sha256=85TBLuSWZjObTsnY_xBqlQV5eQOEABiy6_XKpAOe8BA,122999
30
+ aepp/schemamanager.py,sha256=8WhAh57Iqg2TNi74OVK7X706duUkdMipB4-EvEKVXHg,54407
31
31
  aepp/segmentation.py,sha256=oSgR2yx4nawYN5XAeHV_wefvmXEf0nb-bCguaDmp8F8,43555
32
32
  aepp/sensei.py,sha256=oYNy5BSWAEqsDkEexcQso6NfA6ntGGMnCOyHri0pJs8,7761
33
33
  aepp/som.py,sha256=XNm_Lu2wt2kpSSpldLptuER2eludFXeO9fI6i3iNCzo,34175
34
- aepp/synchronizer.py,sha256=3scwuimQJIBVdEqJ9fVsT1UgmFc9EkH3mpYxUwSoAOE,79363
34
+ aepp/synchronizer.py,sha256=18SC4zCDA43ewIlwOXyp9YNyF-6n6qSv8Pxl-ccvltk,82928
35
35
  aepp/tags.py,sha256=t2qBallTcWR4IOXcDBmrPpqjbSay1z3E2bcRijzVm1s,17641
36
36
  aepp/utils.py,sha256=tG-YVXylm38-bynqfp5N_Mzyo7mhlZj-dLo7wLoO4tM,1200
37
37
  aepp/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
- aepp/cli/__main__.py,sha256=yQGX-aCz_fDCvzjK0g9XVPjHrfJ4ZeWeVXj9aFq928A,85462
38
+ aepp/cli/__main__.py,sha256=ui3q6CdDBd5R43xg9olu-jRioSSh76vHde-S_iaUsEY,87351
39
39
  aepp/cli/upsfieldsanalyzer.py,sha256=GAVBfXN6U8_BfU7doZwcuox71NMwdqQsEpuNgM2Osjc,13124
40
- aepp-0.5.2.post2.dist-info/licenses/LICENSE,sha256=HjYTlfne3BbS5gNHzNqJ5COCiTQLUdf87QkzRyFbE4Y,10337
41
- aepp-0.5.2.post2.dist-info/METADATA,sha256=IbeoTe1HUPrINR-0ZAYJJan3Q8wAGH5lxGg1KuR-1sY,5344
42
- aepp-0.5.2.post2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
- aepp-0.5.2.post2.dist-info/entry_points.txt,sha256=e7HAumUTymoUiCuVRzFlcchennUBLcjxvuiimySF98Y,48
44
- aepp-0.5.2.post2.dist-info/top_level.txt,sha256=dtZJI8SzhWVgZRl68PHKZX_fD6amvDiFR-lqD9FSJvE,5
45
- aepp-0.5.2.post2.dist-info/RECORD,,
40
+ aepp-0.5.2.post3.dist-info/licenses/LICENSE,sha256=HjYTlfne3BbS5gNHzNqJ5COCiTQLUdf87QkzRyFbE4Y,10337
41
+ aepp-0.5.2.post3.dist-info/METADATA,sha256=-2p5jm8PyG-0I-faMx71Tyn0AS-t8iN_DTOwLAXGAdo,5344
42
+ aepp-0.5.2.post3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ aepp-0.5.2.post3.dist-info/entry_points.txt,sha256=e7HAumUTymoUiCuVRzFlcchennUBLcjxvuiimySF98Y,48
44
+ aepp-0.5.2.post3.dist-info/top_level.txt,sha256=dtZJI8SzhWVgZRl68PHKZX_fD6amvDiFR-lqD9FSJvE,5
45
+ aepp-0.5.2.post3.dist-info/RECORD,,