pidatametrics1 0.3.4__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pidatametrics1
3
- Version: 0.3.4
3
+ Version: 0.3.5
4
4
  Summary: A wrapper for Pi Datametrics API with CSV and BigQuery support.
5
5
  Requires-Dist: google-auth
6
6
  Requires-Dist: google-cloud-bigquery
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "pidatametrics1"
7
- version = "0.3.4"
7
+ version = "0.3.5"
8
8
  description = "A wrapper for Pi Datametrics API with CSV and BigQuery support."
9
9
  dependencies = [
10
10
  "requests",
@@ -5,6 +5,25 @@ import datetime
5
5
  from dateutil.relativedelta import relativedelta
6
6
 
7
7
  class PiReportManager(PiDataMetrics):
8
+
9
+ # --- HELPER: Generate Unique Tab Name ---
10
+ def _generate_tab_name(self, base_name, workspace_ref=None):
11
+ """
12
+ Creates a tab name like: Volume_12345_0502_1430
13
+ (Base_WorkspaceID_Date_Time)
14
+ """
15
+ now = datetime.datetime.now()
16
+ timestamp = now.strftime("%d%m_%H%M") # e.g., 0502_1430 (5th Feb, 14:30)
17
+
18
+ ws_part = f"_{workspace_ref}" if workspace_ref else ""
19
+
20
+ # Google Sheets tab limit is 31 chars.
21
+ # Timestamp (9) + Base (approx 10) leaves ~10 for ID.
22
+ full_name = f"{base_name}{ws_part}_{timestamp}"
23
+
24
+ # Truncate to 31 chars to avoid API errors
25
+ return full_name[:31]
26
+
8
27
  def _resolve_workspaces(self, ids_str=None, name_pattern=None):
9
28
  all_ws = self.get_workspaces()
10
29
  targets = {}
@@ -43,8 +62,7 @@ class PiReportManager(PiDataMetrics):
43
62
  current_date -= relativedelta(months=1)
44
63
  return dates
45
64
 
46
- # --- HELPER METHOD FOR EXPORTING ---
47
- def _export_data(self, data, output_mode, filename, bq_config, spreadsheet_name, tab_name="Sheet1"):
65
+ def _export_data(self, data, output_mode, filename, bq_config, spreadsheet_name, tab_name):
48
66
  if not data:
49
67
  print("No data to export.")
50
68
  return
@@ -54,9 +72,9 @@ class PiReportManager(PiDataMetrics):
54
72
  elif output_mode == 'excel':
55
73
  PiExporter.to_excel(data, filename)
56
74
  elif output_mode == 'gsheet' and spreadsheet_name:
75
+ # tab_name is already generated with ID and Timestamp before calling this
57
76
  PiExporter.to_google_sheet(data, spreadsheet_name, tab_name)
58
77
  else:
59
- # Default to CSV
60
78
  PiExporter.to_csv(data, filename)
61
79
 
62
80
  def run_volume_report(self, filename, workspace_ids=None, workspace_name=None, output_mode='csv', bq_config=None, spreadsheet_name=None):
@@ -71,7 +89,11 @@ class PiReportManager(PiDataMetrics):
71
89
  rows = PiParsers.parse_volume_data(vol_data, stg['name'], terms, ws_name)
72
90
  all_rows.extend(rows)
73
91
 
74
- self._export_data(all_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name="Volume")
92
+ # Get the first Workspace ID found to use in the tab name
93
+ ws_ref = list(targets.keys())[0] if targets else "Multi"
94
+ unique_tab = self._generate_tab_name("Vol", ws_ref)
95
+
96
+ self._export_data(all_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name=unique_tab)
75
97
 
76
98
  def run_serp_report(self, data_sources, output_mode='csv', bq_config=None, filename=None, manual_duplication=None, spreadsheet_name=None):
77
99
  yesterday = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
@@ -83,7 +105,11 @@ class PiReportManager(PiDataMetrics):
83
105
  rows = PiParsers.parse_serp_response(raw_data, market, w_name, se_name, yesterday, cat_map, manual_duplication)
84
106
  all_rows.extend(rows)
85
107
 
86
- self._export_data(all_rows, output_mode, filename or "serp_output", bq_config, spreadsheet_name, tab_name="SERP")
108
+ # Use the Workspace ID from the first data source
109
+ ws_ref = data_sources[0][1] if data_sources else "All"
110
+ unique_tab = self._generate_tab_name("SERP", ws_ref)
111
+
112
+ self._export_data(all_rows, output_mode, filename or "serp_output", bq_config, spreadsheet_name, tab_name=unique_tab)
87
113
 
88
114
  def run_historical_serp_report(self, data_sources, duration, frequency, start_date=None, features=None, num_results=25, output_mode='csv', bq_config=None, filename="historical_data", spreadsheet_name=None):
89
115
  if features is None:
@@ -96,12 +122,11 @@ class PiReportManager(PiDataMetrics):
96
122
 
97
123
  print(f"Starting Historical Report ({frequency}) for last {duration} periods...")
98
124
 
99
- all_file_rows = [] # Used for CSV, Excel, and GSheet
125
+ all_file_rows = []
100
126
 
101
127
  for i, date in enumerate(target_dates):
102
128
  print(f"[{i+1}/{len(target_dates)}] Processing Date: {date}...")
103
-
104
- daily_rows = [] # Reset container for this specific date
129
+ daily_rows = []
105
130
 
106
131
  for source in data_sources:
107
132
  market, w_id, w_name, se_id, se_name = source
@@ -111,31 +136,24 @@ class PiReportManager(PiDataMetrics):
111
136
  'serp-feature[]': features
112
137
  }
113
138
  raw_data = self.get_bulk_serp_data(w_id, se_id, date, **params)
114
-
115
- rows = PiParsers.parse_serp_response(
116
- raw_data, market, w_name, se_name, date, category_map=None
117
- )
118
-
139
+ rows = PiParsers.parse_serp_response(raw_data, market, w_name, se_name, date, category_map=None)
119
140
  daily_rows.extend(rows)
120
-
121
141
  except Exception as e:
122
142
  print(f"Failed to fetch {w_name} on {date}: {e}")
123
143
 
124
- # --- UPLOAD LOGIC: PER DATE (BigQuery Only) ---
125
144
  if output_mode == 'bigquery' and bq_config:
126
145
  if daily_rows:
127
146
  print(f"Uploading {len(daily_rows)} rows for {date} to BigQuery...")
128
147
  PiExporter.to_bigquery(daily_rows, bq_config['project'], bq_config['dataset'], bq_config['table'])
129
- else:
130
- print(f"No data found for {date}, skipping upload.")
131
-
132
- # --- FILE LOGIC: ACCUMULATE (CSV, Excel, GSheet) ---
133
148
  elif output_mode in ['csv', 'excel', 'gsheet']:
134
149
  all_file_rows.extend(daily_rows)
135
150
 
136
- # Final Save for File-based outputs
137
151
  if output_mode in ['csv', 'excel', 'gsheet']:
138
- self._export_data(all_file_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name="Historical")
152
+ # Use the Workspace ID from the first data source
153
+ ws_ref = data_sources[0][1] if data_sources else "All"
154
+ unique_tab = self._generate_tab_name("Hist", ws_ref)
155
+
156
+ self._export_data(all_file_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name=unique_tab)
139
157
 
140
158
  def run_llm_report(self, data_sources, start_period, end_period, stg_ids=None, output_mode='csv', bq_config=None, filename="llm_output", spreadsheet_name=None):
141
159
  all_rows = []
@@ -146,11 +164,14 @@ class PiReportManager(PiDataMetrics):
146
164
  try:
147
165
  print(f"Fetching LLM data for {w_name} ({se_name})...")
148
166
  raw_data = self.get_llm_mentions(w_id, se_id, start_period, end_period, stg_ids)
149
-
150
167
  rows = PiParsers.parse_llm_response(raw_data, market, w_name, se_name)
151
168
  all_rows.extend(rows)
152
169
  print(f"Found {len(rows)} mentions/queries.")
153
170
  except Exception as e:
154
171
  print(f"Failed to fetch LLM data for {w_name}: {e}")
155
172
 
156
- self._export_data(all_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name="LLM_Mentions")
173
+ # Use the Workspace ID from the first data source
174
+ ws_ref = data_sources[0][1] if data_sources else "All"
175
+ unique_tab = self._generate_tab_name("LLM", ws_ref)
176
+
177
+ self._export_data(all_rows, output_mode, filename, bq_config, spreadsheet_name, tab_name=unique_tab)
File without changes