arcade-google-sheets 3.0.0__py3-none-any.whl → 3.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- arcade_google_sheets/converters.py +108 -0
- arcade_google_sheets/enums.py +119 -0
- arcade_google_sheets/models.py +36 -1
- arcade_google_sheets/templates.py +8 -0
- arcade_google_sheets/tools/__init__.py +17 -3
- arcade_google_sheets/tools/read.py +39 -0
- arcade_google_sheets/tools/search.py +141 -0
- arcade_google_sheets/tools/write.py +123 -11
- arcade_google_sheets/utils.py +254 -16
- {arcade_google_sheets-3.0.0.dist-info → arcade_google_sheets-3.1.1.dist-info}/METADATA +1 -1
- arcade_google_sheets-3.1.1.dist-info/RECORD +18 -0
- arcade_google_sheets-3.0.0.dist-info/RECORD +0 -15
- {arcade_google_sheets-3.0.0.dist-info → arcade_google_sheets-3.1.1.dist-info}/WHEEL +0 -0
- {arcade_google_sheets-3.0.0.dist-info → arcade_google_sheets-3.1.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from arcade_google_sheets.enums import Dimension
|
|
2
|
+
from arcade_google_sheets.models import CellValue, SheetDataInput, ValueRange
|
|
3
|
+
from arcade_google_sheets.utils import (
|
|
4
|
+
col_to_index,
|
|
5
|
+
group_contiguous_rows,
|
|
6
|
+
index_to_col,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SheetDataInputToValueRangesConverter:
|
|
11
|
+
def __init__(self, sheet_name: str, sheet_data: SheetDataInput):
|
|
12
|
+
self.sheet_name = sheet_name
|
|
13
|
+
self.sheet_data = sheet_data
|
|
14
|
+
|
|
15
|
+
def convert(self) -> list[ValueRange]:
|
|
16
|
+
"""
|
|
17
|
+
Convert a SheetDataInput to a list of ValueRanges that are row-oriented.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
sheet_name (str): The name of the sheet to which the data belongs.
|
|
21
|
+
sheet_data (SheetDataInput): The data to convert into ranges.
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
list[ValueRange]: The converted ValueRanges.
|
|
25
|
+
"""
|
|
26
|
+
if not self.sheet_data.data:
|
|
27
|
+
return []
|
|
28
|
+
|
|
29
|
+
row_ranges = self._build_row_oriented_ranges()
|
|
30
|
+
|
|
31
|
+
return row_ranges
|
|
32
|
+
|
|
33
|
+
def _to_float_if_int(self, value: CellValue) -> bool | str | float:
|
|
34
|
+
"""
|
|
35
|
+
The spreadsheets.values.batchUpdate API does not support int values.
|
|
36
|
+
So we convert ints to floats.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
value (Any): The value to possibly convert.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
bool | str | float: The converted value.
|
|
43
|
+
"""
|
|
44
|
+
if isinstance(value, bool):
|
|
45
|
+
return value
|
|
46
|
+
if isinstance(value, int):
|
|
47
|
+
return float(value)
|
|
48
|
+
return value
|
|
49
|
+
|
|
50
|
+
def _get_cell_value(self, row_num: int, col_idx: int) -> bool | str | float:
|
|
51
|
+
"""
|
|
52
|
+
Safely fetch a cell value.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
row_num (int): The row number of the cell.
|
|
56
|
+
col_idx (int): The column index of the cell.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
bool | str | float: The value of the cell.
|
|
60
|
+
"""
|
|
61
|
+
col_letter = index_to_col(col_idx)
|
|
62
|
+
return self._to_float_if_int(self.sheet_data.data[row_num][col_letter])
|
|
63
|
+
|
|
64
|
+
def _build_row_oriented_ranges(self) -> list[ValueRange]:
|
|
65
|
+
"""
|
|
66
|
+
Build row-oriented ValueRanges for the object's sheet data.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
list[ValueRange]: The row-oriented ValueRanges.
|
|
70
|
+
"""
|
|
71
|
+
# Map (start_col_idx, end_col_idx) -> { row_num: [values across columns] }
|
|
72
|
+
segment_to_rows_values: dict[tuple[int, int], dict[int, list[bool | str | float]]] = {}
|
|
73
|
+
|
|
74
|
+
for row_num in sorted(self.sheet_data.data):
|
|
75
|
+
cols_dict = self.sheet_data.data[row_num]
|
|
76
|
+
col_indices = sorted(col_to_index(col) for col in cols_dict)
|
|
77
|
+
if not col_indices:
|
|
78
|
+
continue
|
|
79
|
+
contiguous_groups = group_contiguous_rows(col_indices)
|
|
80
|
+
for group in contiguous_groups:
|
|
81
|
+
start_idx = group[0]
|
|
82
|
+
end_idx = group[-1]
|
|
83
|
+
row_values = [self._get_cell_value(row_num, ci) for ci in group]
|
|
84
|
+
key = (start_idx, end_idx)
|
|
85
|
+
if key not in segment_to_rows_values:
|
|
86
|
+
segment_to_rows_values[key] = {}
|
|
87
|
+
segment_to_rows_values[key][row_num] = row_values
|
|
88
|
+
|
|
89
|
+
row_oriented_ranges: list[ValueRange] = []
|
|
90
|
+
for (start_idx, end_idx), rows_map in segment_to_rows_values.items():
|
|
91
|
+
sorted_rows = sorted(rows_map.keys())
|
|
92
|
+
row_groups = group_contiguous_rows(sorted_rows)
|
|
93
|
+
for rg in row_groups:
|
|
94
|
+
start_row = rg[0]
|
|
95
|
+
end_row = rg[-1]
|
|
96
|
+
start_col = index_to_col(start_idx)
|
|
97
|
+
end_col = index_to_col(end_idx)
|
|
98
|
+
a1_range = f"'{self.sheet_name}'!{start_col}{start_row}:{end_col}{end_row}"
|
|
99
|
+
values = [rows_map[r] for r in rg]
|
|
100
|
+
row_oriented_ranges.append(
|
|
101
|
+
ValueRange(
|
|
102
|
+
range=a1_range,
|
|
103
|
+
majorDimension=Dimension.ROWS,
|
|
104
|
+
values=values,
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
return row_oriented_ranges
|
arcade_google_sheets/enums.py
CHANGED
|
@@ -28,3 +28,122 @@ class NumberFormatType(str, Enum):
|
|
|
28
28
|
class SheetIdentifierType(str, Enum):
|
|
29
29
|
POSITION = "position"
|
|
30
30
|
ID_OR_NAME = "id_or_name"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Dimension(str, Enum):
|
|
34
|
+
ROWS = "ROWS" # Operates on the rows of a sheet.
|
|
35
|
+
COLUMNS = "COLUMNS" # Operates on the columns of a sheet.
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# ------------------------------------------------------------
|
|
39
|
+
# Drive API enums
|
|
40
|
+
# ------------------------------------------------------------
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class OrderBy(str, Enum):
|
|
44
|
+
"""
|
|
45
|
+
Sort keys for ordering files in Google Drive.
|
|
46
|
+
Each key has both ascending and descending options.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
CREATED_TIME = (
|
|
50
|
+
# When the file was created (ascending)
|
|
51
|
+
"createdTime"
|
|
52
|
+
)
|
|
53
|
+
CREATED_TIME_DESC = (
|
|
54
|
+
# When the file was created (descending)
|
|
55
|
+
"createdTime desc"
|
|
56
|
+
)
|
|
57
|
+
FOLDER = (
|
|
58
|
+
# The folder ID, sorted using alphabetical ordering (ascending)
|
|
59
|
+
"folder"
|
|
60
|
+
)
|
|
61
|
+
FOLDER_DESC = (
|
|
62
|
+
# The folder ID, sorted using alphabetical ordering (descending)
|
|
63
|
+
"folder desc"
|
|
64
|
+
)
|
|
65
|
+
MODIFIED_BY_ME_TIME = (
|
|
66
|
+
# The last time the file was modified by the user (ascending)
|
|
67
|
+
"modifiedByMeTime"
|
|
68
|
+
)
|
|
69
|
+
MODIFIED_BY_ME_TIME_DESC = (
|
|
70
|
+
# The last time the file was modified by the user (descending)
|
|
71
|
+
"modifiedByMeTime desc"
|
|
72
|
+
)
|
|
73
|
+
MODIFIED_TIME = (
|
|
74
|
+
# The last time the file was modified by anyone (ascending)
|
|
75
|
+
"modifiedTime"
|
|
76
|
+
)
|
|
77
|
+
MODIFIED_TIME_DESC = (
|
|
78
|
+
# The last time the file was modified by anyone (descending)
|
|
79
|
+
"modifiedTime desc"
|
|
80
|
+
)
|
|
81
|
+
NAME = (
|
|
82
|
+
# The name of the file, sorted using alphabetical ordering (e.g., 1, 12, 2, 22) (ascending)
|
|
83
|
+
"name"
|
|
84
|
+
)
|
|
85
|
+
NAME_DESC = (
|
|
86
|
+
# The name of the file, sorted using alphabetical ordering (e.g., 1, 12, 2, 22) (descending)
|
|
87
|
+
"name desc"
|
|
88
|
+
)
|
|
89
|
+
NAME_NATURAL = (
|
|
90
|
+
# The name of the file, sorted using natural sort ordering (e.g., 1, 2, 12, 22) (ascending)
|
|
91
|
+
"name_natural"
|
|
92
|
+
)
|
|
93
|
+
NAME_NATURAL_DESC = (
|
|
94
|
+
# The name of the file, sorted using natural sort ordering (e.g., 1, 2, 12, 22) (descending)
|
|
95
|
+
"name_natural desc"
|
|
96
|
+
)
|
|
97
|
+
QUOTA_BYTES_USED = (
|
|
98
|
+
# The number of storage quota bytes used by the file (ascending)
|
|
99
|
+
"quotaBytesUsed"
|
|
100
|
+
)
|
|
101
|
+
QUOTA_BYTES_USED_DESC = (
|
|
102
|
+
# The number of storage quota bytes used by the file (descending)
|
|
103
|
+
"quotaBytesUsed desc"
|
|
104
|
+
)
|
|
105
|
+
RECENCY = (
|
|
106
|
+
# The most recent timestamp from the file's date-time fields (ascending)
|
|
107
|
+
"recency"
|
|
108
|
+
)
|
|
109
|
+
RECENCY_DESC = (
|
|
110
|
+
# The most recent timestamp from the file's date-time fields (descending)
|
|
111
|
+
"recency desc"
|
|
112
|
+
)
|
|
113
|
+
SHARED_WITH_ME_TIME = (
|
|
114
|
+
# When the file was shared with the user, if applicable (ascending)
|
|
115
|
+
"sharedWithMeTime"
|
|
116
|
+
)
|
|
117
|
+
SHARED_WITH_ME_TIME_DESC = (
|
|
118
|
+
# When the file was shared with the user, if applicable (descending)
|
|
119
|
+
"sharedWithMeTime desc"
|
|
120
|
+
)
|
|
121
|
+
STARRED = (
|
|
122
|
+
# Whether the user has starred the file (ascending)
|
|
123
|
+
"starred"
|
|
124
|
+
)
|
|
125
|
+
STARRED_DESC = (
|
|
126
|
+
# Whether the user has starred the file (descending)
|
|
127
|
+
"starred desc"
|
|
128
|
+
)
|
|
129
|
+
VIEWED_BY_ME_TIME = (
|
|
130
|
+
# The last time the file was viewed by the user (ascending)
|
|
131
|
+
"viewedByMeTime"
|
|
132
|
+
)
|
|
133
|
+
VIEWED_BY_ME_TIME_DESC = (
|
|
134
|
+
# The last time the file was viewed by the user (descending)
|
|
135
|
+
"viewedByMeTime desc"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class Corpora(str, Enum):
|
|
140
|
+
"""
|
|
141
|
+
Bodies of items (spreadsheets) to which the query applies.
|
|
142
|
+
Prefer 'user' or 'drive' to 'allDrives' for efficiency.
|
|
143
|
+
By default, corpora is set to 'user'.
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
USER = "user"
|
|
147
|
+
DOMAIN = "domain"
|
|
148
|
+
DRIVE = "drive"
|
|
149
|
+
ALL_DRIVES = "allDrives"
|
arcade_google_sheets/models.py
CHANGED
|
@@ -3,7 +3,7 @@ from typing import Optional
|
|
|
3
3
|
|
|
4
4
|
from pydantic import BaseModel, field_validator, model_validator
|
|
5
5
|
|
|
6
|
-
from arcade_google_sheets.enums import CellErrorType, NumberFormatType
|
|
6
|
+
from arcade_google_sheets.enums import CellErrorType, Dimension, NumberFormatType
|
|
7
7
|
from arcade_google_sheets.types import CellValue
|
|
8
8
|
|
|
9
9
|
|
|
@@ -137,6 +137,41 @@ class Spreadsheet(BaseModel):
|
|
|
137
137
|
|
|
138
138
|
properties: SpreadsheetProperties
|
|
139
139
|
sheets: list[Sheet]
|
|
140
|
+
spreadsheetId: str | None = None
|
|
141
|
+
spreadsheetUrl: str | None = None
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class ValueRange(BaseModel):
|
|
145
|
+
"""A range of cells in a spreadsheet
|
|
146
|
+
|
|
147
|
+
An implementation of https://developers.google.com/workspace/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange
|
|
148
|
+
|
|
149
|
+
Example 1:
|
|
150
|
+
{
|
|
151
|
+
"range": "Sheet1!A1:B2",
|
|
152
|
+
"majorDimension": "ROWS",
|
|
153
|
+
"values": [
|
|
154
|
+
["1", "2"],
|
|
155
|
+
["3", "4"]
|
|
156
|
+
]
|
|
157
|
+
}
|
|
158
|
+
Example 2:
|
|
159
|
+
{
|
|
160
|
+
"range": "Sheet1!A1:A4",
|
|
161
|
+
"majorDimension": "COLUMNS",
|
|
162
|
+
"values": [
|
|
163
|
+
["Item", "Wheel", "Door", "Engine"]
|
|
164
|
+
]
|
|
165
|
+
}
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
range: str # A1 notation
|
|
169
|
+
majorDimension: Dimension
|
|
170
|
+
# values is a 2D array. The outer array represents all the data and each inner
|
|
171
|
+
# array represents a major dimension. Each item in the inner array corresponds
|
|
172
|
+
# with one cell.
|
|
173
|
+
# Note: Google API docs don't mention support for int, so CellValue is not used
|
|
174
|
+
values: list[list[bool | str | float]]
|
|
140
175
|
|
|
141
176
|
|
|
142
177
|
class SheetDataInput(BaseModel):
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
optional_file_picker_instructions_template = (
|
|
2
|
+
"Ensure the user knows that they have the option to select and grant access permissions to "
|
|
3
|
+
"additional files and folders via the Google Drive File Picker. "
|
|
4
|
+
"The user can pick additional files and folders via the following link: {url}"
|
|
5
|
+
)
|
|
6
|
+
|
|
7
|
+
spreadsheet_url_template = "https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit"
|
|
8
|
+
sheet_url_template = "https://docs.google.com/spreadsheets/d/{spreadsheet_id}/edit#gid={sheet_id}"
|
|
@@ -1,4 +1,18 @@
|
|
|
1
|
-
from arcade_google_sheets.tools.read import get_spreadsheet
|
|
2
|
-
from arcade_google_sheets.tools.
|
|
1
|
+
from arcade_google_sheets.tools.read import get_spreadsheet, get_spreadsheet_metadata
|
|
2
|
+
from arcade_google_sheets.tools.search import search_spreadsheets
|
|
3
|
+
from arcade_google_sheets.tools.write import (
|
|
4
|
+
add_note_to_cell,
|
|
5
|
+
create_spreadsheet,
|
|
6
|
+
update_cells,
|
|
7
|
+
write_to_cell,
|
|
8
|
+
)
|
|
3
9
|
|
|
4
|
-
__all__ = [
|
|
10
|
+
__all__ = [
|
|
11
|
+
"create_spreadsheet",
|
|
12
|
+
"get_spreadsheet",
|
|
13
|
+
"get_spreadsheet_metadata",
|
|
14
|
+
"search_spreadsheets",
|
|
15
|
+
"update_cells",
|
|
16
|
+
"add_note_to_cell",
|
|
17
|
+
"write_to_cell",
|
|
18
|
+
]
|
|
@@ -4,8 +4,10 @@ from arcade_tdk import ToolContext, ToolMetadataKey, tool
|
|
|
4
4
|
from arcade_tdk.auth import Google
|
|
5
5
|
|
|
6
6
|
from arcade_google_sheets.decorators import with_filepicker_fallback
|
|
7
|
+
from arcade_google_sheets.templates import sheet_url_template
|
|
7
8
|
from arcade_google_sheets.utils import (
|
|
8
9
|
build_sheets_service,
|
|
10
|
+
get_spreadsheet_metadata_helper,
|
|
9
11
|
get_spreadsheet_with_pagination,
|
|
10
12
|
process_get_spreadsheet_params,
|
|
11
13
|
raise_for_large_payload,
|
|
@@ -82,3 +84,40 @@ async def get_spreadsheet(
|
|
|
82
84
|
|
|
83
85
|
raise_for_large_payload(data)
|
|
84
86
|
return data
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@tool(
|
|
90
|
+
requires_auth=Google(
|
|
91
|
+
scopes=["https://www.googleapis.com/auth/drive.file"],
|
|
92
|
+
),
|
|
93
|
+
requires_metadata=[ToolMetadataKey.CLIENT_ID, ToolMetadataKey.COORDINATOR_URL],
|
|
94
|
+
)
|
|
95
|
+
@with_filepicker_fallback
|
|
96
|
+
async def get_spreadsheet_metadata(
|
|
97
|
+
context: ToolContext,
|
|
98
|
+
spreadsheet_id: Annotated[str, "The id of the spreadsheet to get metadata for"],
|
|
99
|
+
) -> Annotated[dict, "The spreadsheet metadata for the specified spreadsheet"]:
|
|
100
|
+
"""Gets the metadata for a spreadsheet including the metadata for the sheets in the spreadsheet.
|
|
101
|
+
|
|
102
|
+
Use this tool to get the name, position, ID, and URL of all sheets in a spreadsheet as well as
|
|
103
|
+
the number of rows and columns in each sheet.
|
|
104
|
+
|
|
105
|
+
Does not return the content/data of the sheets in the spreadsheet - only the metadata.
|
|
106
|
+
Excludes spreadsheets that are in the trash.
|
|
107
|
+
"""
|
|
108
|
+
service = build_sheets_service(context.get_auth_token_or_empty())
|
|
109
|
+
|
|
110
|
+
metadata = get_spreadsheet_metadata_helper(service, spreadsheet_id)
|
|
111
|
+
metadata_dict = metadata.model_dump(exclude_none=True)
|
|
112
|
+
for sheet in metadata_dict.get("sheets", []):
|
|
113
|
+
sheet["sheet_url"] = sheet_url_template.format(
|
|
114
|
+
spreadsheet_id=spreadsheet_id,
|
|
115
|
+
sheet_id=sheet["properties"]["sheetId"],
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
"spreadsheet_title": metadata_dict["properties"]["title"],
|
|
120
|
+
"spreadsheet_id": metadata_dict["spreadsheetId"],
|
|
121
|
+
"spreadsheet_url": metadata_dict["spreadsheetUrl"],
|
|
122
|
+
"sheets": metadata_dict["sheets"],
|
|
123
|
+
}
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from typing import Annotated, Any
|
|
2
|
+
|
|
3
|
+
from arcade_tdk import ToolContext, ToolMetadataKey, tool
|
|
4
|
+
from arcade_tdk.auth import Google
|
|
5
|
+
|
|
6
|
+
from arcade_google_sheets.enums import OrderBy
|
|
7
|
+
from arcade_google_sheets.file_picker import generate_google_file_picker_url
|
|
8
|
+
from arcade_google_sheets.templates import (
|
|
9
|
+
optional_file_picker_instructions_template,
|
|
10
|
+
spreadsheet_url_template,
|
|
11
|
+
)
|
|
12
|
+
from arcade_google_sheets.utils import (
|
|
13
|
+
build_drive_service,
|
|
14
|
+
build_files_list_params,
|
|
15
|
+
remove_none_values,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@tool(
|
|
20
|
+
requires_auth=Google(
|
|
21
|
+
scopes=["https://www.googleapis.com/auth/drive.file"],
|
|
22
|
+
),
|
|
23
|
+
requires_metadata=[ToolMetadataKey.CLIENT_ID, ToolMetadataKey.COORDINATOR_URL],
|
|
24
|
+
)
|
|
25
|
+
async def search_spreadsheets(
|
|
26
|
+
context: ToolContext,
|
|
27
|
+
spreadsheet_contains: Annotated[
|
|
28
|
+
list[str] | None,
|
|
29
|
+
"Keywords or phrases that must be in the spreadsheet title. Provide a list of "
|
|
30
|
+
"keywords or phrases if needed.",
|
|
31
|
+
] = None,
|
|
32
|
+
spreadsheet_not_contains: Annotated[
|
|
33
|
+
list[str] | None,
|
|
34
|
+
"Keywords or phrases that must NOT be in the spreadsheet title. Provide a list of "
|
|
35
|
+
"keywords or phrases if needed.",
|
|
36
|
+
] = None,
|
|
37
|
+
search_only_in_shared_drive_id: Annotated[
|
|
38
|
+
str | None,
|
|
39
|
+
"The ID of the shared drive to restrict the search to. If provided, the search will only "
|
|
40
|
+
"return spreadsheets from this drive. Defaults to None, which searches across all drives.",
|
|
41
|
+
] = None,
|
|
42
|
+
include_shared_drives: Annotated[
|
|
43
|
+
bool,
|
|
44
|
+
"Whether to include spreadsheets from shared drives. Defaults to False (searches only in "
|
|
45
|
+
"the user's 'My Drive').",
|
|
46
|
+
] = False,
|
|
47
|
+
include_organization_domain_spreadsheets: Annotated[
|
|
48
|
+
bool,
|
|
49
|
+
"Whether to include spreadsheets from the organization's domain. "
|
|
50
|
+
"This is applicable to admin users who have permissions to view "
|
|
51
|
+
"organization-wide spreadsheets in a Google Workspace account. "
|
|
52
|
+
"Defaults to False.",
|
|
53
|
+
] = False,
|
|
54
|
+
order_by: Annotated[
|
|
55
|
+
list[OrderBy] | None,
|
|
56
|
+
"Sort order. Defaults to listing the most recently modified spreadsheets first. "
|
|
57
|
+
"If spreadsheet_contains or spreadsheet_not_contains is provided, "
|
|
58
|
+
"then the order_by will be ignored.",
|
|
59
|
+
] = None,
|
|
60
|
+
limit: Annotated[
|
|
61
|
+
int, "The maximum number of spreadsheets to list. Defaults to 10. Max is 50"
|
|
62
|
+
] = 10,
|
|
63
|
+
pagination_token: Annotated[
|
|
64
|
+
str | None, "The pagination token to continue a previous request"
|
|
65
|
+
] = None,
|
|
66
|
+
) -> Annotated[
|
|
67
|
+
dict,
|
|
68
|
+
"A dictionary containing the title, ID, and URL for each matching spreadsheet. "
|
|
69
|
+
"Also contains a pagination token if there are more spreadsheets to list.",
|
|
70
|
+
]:
|
|
71
|
+
"""
|
|
72
|
+
Searches for spreadsheets in the user's Google Drive based on the titles and content and
|
|
73
|
+
returns the title, ID, and URL for each matching spreadsheet.
|
|
74
|
+
|
|
75
|
+
Does not return the content/data of the sheets in the spreadsheets - only the metadata.
|
|
76
|
+
Excludes spreadsheets that are in the trash.
|
|
77
|
+
"""
|
|
78
|
+
if spreadsheet_contains or spreadsheet_not_contains:
|
|
79
|
+
# Google drive API does not support other order_by values for
|
|
80
|
+
# queries with fullText search (which is used when spreadsheet_contains
|
|
81
|
+
# or spreadsheet_not_contains is provided).
|
|
82
|
+
order_by = None
|
|
83
|
+
if order_by is None:
|
|
84
|
+
order_by = [OrderBy.MODIFIED_TIME_DESC]
|
|
85
|
+
elif isinstance(order_by, OrderBy):
|
|
86
|
+
order_by = [order_by]
|
|
87
|
+
|
|
88
|
+
limit = max(1, min(50, limit))
|
|
89
|
+
page_size = min(10, limit)
|
|
90
|
+
spreadsheets: list[dict[str, Any]] = []
|
|
91
|
+
|
|
92
|
+
drive_service = build_drive_service(context.get_auth_token_or_empty())
|
|
93
|
+
|
|
94
|
+
params = build_files_list_params(
|
|
95
|
+
mime_type="application/vnd.google-apps.spreadsheet",
|
|
96
|
+
page_size=page_size,
|
|
97
|
+
order_by=order_by,
|
|
98
|
+
pagination_token=pagination_token,
|
|
99
|
+
include_shared_drives=include_shared_drives,
|
|
100
|
+
search_only_in_shared_drive_id=search_only_in_shared_drive_id,
|
|
101
|
+
include_organization_domain_spreadsheets=include_organization_domain_spreadsheets,
|
|
102
|
+
spreadsheet_contains=spreadsheet_contains,
|
|
103
|
+
spreadsheet_not_contains=spreadsheet_not_contains,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
while len(spreadsheets) < limit:
|
|
107
|
+
if pagination_token:
|
|
108
|
+
params["pageToken"] = pagination_token
|
|
109
|
+
else:
|
|
110
|
+
params.pop("pageToken", None)
|
|
111
|
+
|
|
112
|
+
results = drive_service.files().list(**params).execute()
|
|
113
|
+
batch = results.get("files", [])
|
|
114
|
+
spreadsheets.extend(batch[: limit - len(spreadsheets)])
|
|
115
|
+
|
|
116
|
+
pagination_token = results.get("nextPageToken")
|
|
117
|
+
if not pagination_token or len(batch) < page_size:
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
# Add the spreadsheet URL to each spreadsheet
|
|
121
|
+
for spreadsheet in spreadsheets:
|
|
122
|
+
spreadsheet["url"] = spreadsheet_url_template.format(spreadsheet_id=spreadsheet["id"])
|
|
123
|
+
|
|
124
|
+
file_picker_response = generate_google_file_picker_url(
|
|
125
|
+
context,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
tool_response = {
|
|
129
|
+
"pagination_token": pagination_token,
|
|
130
|
+
"spreadsheets_count": len(spreadsheets),
|
|
131
|
+
"spreadsheets": spreadsheets,
|
|
132
|
+
"file_picker": {
|
|
133
|
+
"url": file_picker_response["url"],
|
|
134
|
+
"llm_instructions": optional_file_picker_instructions_template.format(
|
|
135
|
+
url=file_picker_response["url"]
|
|
136
|
+
),
|
|
137
|
+
},
|
|
138
|
+
}
|
|
139
|
+
tool_response = remove_none_values(tool_response)
|
|
140
|
+
|
|
141
|
+
return tool_response
|
|
@@ -2,17 +2,20 @@ from typing import Annotated
|
|
|
2
2
|
|
|
3
3
|
from arcade_tdk import ToolContext, tool
|
|
4
4
|
from arcade_tdk.auth import Google
|
|
5
|
-
from arcade_tdk.errors import RetryableToolError
|
|
6
5
|
|
|
6
|
+
from arcade_google_sheets.converters import SheetDataInputToValueRangesConverter
|
|
7
7
|
from arcade_google_sheets.models import (
|
|
8
|
-
SheetDataInput,
|
|
9
8
|
Spreadsheet,
|
|
10
9
|
SpreadsheetProperties,
|
|
11
10
|
)
|
|
12
11
|
from arcade_google_sheets.utils import (
|
|
12
|
+
batch_update,
|
|
13
13
|
build_sheets_service,
|
|
14
|
+
col_to_index,
|
|
14
15
|
create_sheet,
|
|
16
|
+
get_sheet_metadata_from_identifier,
|
|
15
17
|
parse_write_to_cell_response,
|
|
18
|
+
validate_sheet_data_input,
|
|
16
19
|
validate_write_to_cell_params,
|
|
17
20
|
)
|
|
18
21
|
|
|
@@ -40,15 +43,7 @@ def create_spreadsheet(
|
|
|
40
43
|
"""
|
|
41
44
|
service = build_sheets_service(context.get_auth_token_or_empty())
|
|
42
45
|
|
|
43
|
-
|
|
44
|
-
sheet_data = SheetDataInput(data=data) # type: ignore[arg-type]
|
|
45
|
-
except Exception as e:
|
|
46
|
-
msg = "Invalid JSON or unexpected data format for parameter `data`"
|
|
47
|
-
raise RetryableToolError(
|
|
48
|
-
message=msg,
|
|
49
|
-
additional_prompt_content=f"{msg}: {e}",
|
|
50
|
-
retry_after_ms=100,
|
|
51
|
-
)
|
|
46
|
+
sheet_data = validate_sheet_data_input(data)
|
|
52
47
|
|
|
53
48
|
spreadsheet = Spreadsheet(
|
|
54
49
|
properties=SpreadsheetProperties(title=title),
|
|
@@ -112,3 +107,120 @@ def write_to_cell(
|
|
|
112
107
|
)
|
|
113
108
|
|
|
114
109
|
return parse_write_to_cell_response(sheet_properties)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@tool(
|
|
113
|
+
requires_auth=Google(
|
|
114
|
+
scopes=["https://www.googleapis.com/auth/drive.file"],
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
def update_cells(
|
|
118
|
+
context: ToolContext,
|
|
119
|
+
spreadsheet_id: Annotated[str, "The id of the spreadsheet to write to"],
|
|
120
|
+
data: Annotated[
|
|
121
|
+
str,
|
|
122
|
+
"The data to write. A JSON string (property names enclosed in double quotes) "
|
|
123
|
+
"representing a dictionary that maps row numbers to dictionaries that map "
|
|
124
|
+
"column letters to cell values. For example, data[23]['C'] is the value for cell C23. "
|
|
125
|
+
"This is the same format accepted by create_spreadsheet. "
|
|
126
|
+
"Type hint: dict[int, dict[str, int | float | str | bool]]",
|
|
127
|
+
],
|
|
128
|
+
sheet_position: Annotated[
|
|
129
|
+
int | None,
|
|
130
|
+
"The position/tab of the sheet in the spreadsheet to write to. "
|
|
131
|
+
"A value of 1 represents the first (leftmost/Sheet1) sheet. "
|
|
132
|
+
"Defaults to 1.",
|
|
133
|
+
] = 1,
|
|
134
|
+
sheet_id_or_name: Annotated[
|
|
135
|
+
str | None,
|
|
136
|
+
"The id or name of the sheet to write to. If provided, takes "
|
|
137
|
+
"precedence over sheet_position.",
|
|
138
|
+
] = None,
|
|
139
|
+
) -> Annotated[dict, "The status of the operation, including updated ranges and counts"]:
|
|
140
|
+
"""
|
|
141
|
+
Write values to a Google Sheet using a flexible data format.
|
|
142
|
+
|
|
143
|
+
sheet_id_or_name takes precedence over sheet_position. If a sheet is not mentioned,
|
|
144
|
+
then always assume the default sheet_position is sufficient.
|
|
145
|
+
"""
|
|
146
|
+
service = build_sheets_service(context.get_auth_token_or_empty())
|
|
147
|
+
|
|
148
|
+
sheet_data = validate_sheet_data_input(data)
|
|
149
|
+
sheet_name, sheet_id, sheet_url = get_sheet_metadata_from_identifier(
|
|
150
|
+
service, spreadsheet_id, sheet_position, sheet_id_or_name
|
|
151
|
+
)
|
|
152
|
+
converter = SheetDataInputToValueRangesConverter(sheet_name, sheet_data)
|
|
153
|
+
value_ranges = converter.convert()
|
|
154
|
+
|
|
155
|
+
response = batch_update(service, spreadsheet_id, value_ranges)
|
|
156
|
+
|
|
157
|
+
return {**response, "sheet_url": sheet_url, "sheet_id": sheet_id}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@tool(
|
|
161
|
+
requires_auth=Google(
|
|
162
|
+
scopes=["https://www.googleapis.com/auth/drive.file"],
|
|
163
|
+
)
|
|
164
|
+
)
|
|
165
|
+
def add_note_to_cell(
|
|
166
|
+
context: ToolContext,
|
|
167
|
+
spreadsheet_id: Annotated[str, "The id of the spreadsheet to add a comment to"],
|
|
168
|
+
column: Annotated[str, "The column string to add a note to. For example, 'A', 'F', or 'AZ'"],
|
|
169
|
+
row: Annotated[int, "The row number to add a note to"],
|
|
170
|
+
note_text: Annotated[str, "The text for the note to add"],
|
|
171
|
+
sheet_position: Annotated[
|
|
172
|
+
int | None,
|
|
173
|
+
"The position/tab of the sheet in the spreadsheet to write to. "
|
|
174
|
+
"A value of 1 represents the first (leftmost/Sheet1) sheet. "
|
|
175
|
+
"Defaults to 1.",
|
|
176
|
+
] = 1,
|
|
177
|
+
sheet_id_or_name: Annotated[
|
|
178
|
+
str | None,
|
|
179
|
+
"The id or name of the sheet to write to. If provided, takes "
|
|
180
|
+
"precedence over sheet_position.",
|
|
181
|
+
] = None,
|
|
182
|
+
) -> Annotated[dict, "The status of the operation"]:
|
|
183
|
+
"""
|
|
184
|
+
Add a note to a specific cell in a spreadsheet. A note is a small
|
|
185
|
+
piece of text attached to a cell (shown with a black triangle) that
|
|
186
|
+
appears when you hover over the cell.
|
|
187
|
+
|
|
188
|
+
sheet_id_or_name takes precedence over sheet_position. If a sheet is not mentioned,
|
|
189
|
+
then always assume the default sheet_position is sufficient.
|
|
190
|
+
"""
|
|
191
|
+
service = build_sheets_service(context.get_auth_token_or_empty())
|
|
192
|
+
|
|
193
|
+
sheet_name, sheet_id, sheet_url = get_sheet_metadata_from_identifier(
|
|
194
|
+
service, spreadsheet_id, sheet_position, sheet_id_or_name
|
|
195
|
+
)
|
|
196
|
+
column_index = col_to_index(column)
|
|
197
|
+
|
|
198
|
+
service.spreadsheets().batchUpdate(
|
|
199
|
+
spreadsheetId=spreadsheet_id,
|
|
200
|
+
body={
|
|
201
|
+
"requests": [
|
|
202
|
+
{
|
|
203
|
+
"repeatCell": {
|
|
204
|
+
"range": {
|
|
205
|
+
"sheetId": sheet_id,
|
|
206
|
+
"startRowIndex": row - 1,
|
|
207
|
+
"endRowIndex": row,
|
|
208
|
+
"startColumnIndex": column_index,
|
|
209
|
+
"endColumnIndex": column_index + 1,
|
|
210
|
+
},
|
|
211
|
+
"cell": {
|
|
212
|
+
"note": note_text,
|
|
213
|
+
},
|
|
214
|
+
"fields": "note",
|
|
215
|
+
},
|
|
216
|
+
}
|
|
217
|
+
]
|
|
218
|
+
},
|
|
219
|
+
).execute()
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
"status": "success",
|
|
223
|
+
"sheet_url": sheet_url,
|
|
224
|
+
"sheet_id": sheet_id,
|
|
225
|
+
"sheet_name": sheet_name,
|
|
226
|
+
}
|
arcade_google_sheets/utils.py
CHANGED
|
@@ -10,7 +10,12 @@ from arcade_google_sheets.constants import (
|
|
|
10
10
|
DEFAULT_SHEET_COLUMN_COUNT,
|
|
11
11
|
DEFAULT_SHEET_ROW_COUNT,
|
|
12
12
|
)
|
|
13
|
-
from arcade_google_sheets.enums import
|
|
13
|
+
from arcade_google_sheets.enums import (
|
|
14
|
+
Corpora,
|
|
15
|
+
NumberFormatType,
|
|
16
|
+
OrderBy,
|
|
17
|
+
SheetIdentifierType,
|
|
18
|
+
)
|
|
14
19
|
from arcade_google_sheets.models import (
|
|
15
20
|
CellData,
|
|
16
21
|
CellExtendedValue,
|
|
@@ -23,7 +28,9 @@ from arcade_google_sheets.models import (
|
|
|
23
28
|
SheetDataInput,
|
|
24
29
|
SheetProperties,
|
|
25
30
|
Spreadsheet,
|
|
31
|
+
ValueRange,
|
|
26
32
|
)
|
|
33
|
+
from arcade_google_sheets.templates import sheet_url_template
|
|
27
34
|
from arcade_google_sheets.types import CellValue
|
|
28
35
|
|
|
29
36
|
logging.basicConfig(
|
|
@@ -34,6 +41,15 @@ logging.basicConfig(
|
|
|
34
41
|
logger = logging.getLogger(__name__)
|
|
35
42
|
|
|
36
43
|
|
|
44
|
+
def remove_none_values(params: dict) -> dict:
|
|
45
|
+
"""
|
|
46
|
+
Remove None values from a dictionary.
|
|
47
|
+
:param params: The dictionary to clean
|
|
48
|
+
:return: A new dictionary with None values removed
|
|
49
|
+
"""
|
|
50
|
+
return {k: v for k, v in params.items() if v is not None}
|
|
51
|
+
|
|
52
|
+
|
|
37
53
|
def build_sheets_service(auth_token: str | None) -> Resource: # type: ignore[no-any-unimported]
|
|
38
54
|
"""
|
|
39
55
|
Build a Sheets service object.
|
|
@@ -42,6 +58,14 @@ def build_sheets_service(auth_token: str | None) -> Resource: # type: ignore[no
|
|
|
42
58
|
return build("sheets", "v4", credentials=Credentials(auth_token))
|
|
43
59
|
|
|
44
60
|
|
|
61
|
+
def build_drive_service(auth_token: str | None) -> Resource: # type: ignore[no-any-unimported]
|
|
62
|
+
"""
|
|
63
|
+
Build a Drive service object.
|
|
64
|
+
"""
|
|
65
|
+
auth_token = auth_token or ""
|
|
66
|
+
return build("drive", "v3", credentials=Credentials(auth_token))
|
|
67
|
+
|
|
68
|
+
|
|
45
69
|
def col_to_index(col: str) -> int:
|
|
46
70
|
"""Convert a sheet's column string to a 0-indexed column index
|
|
47
71
|
|
|
@@ -461,6 +485,34 @@ def convert_api_grid_data_to_dict(grids: list[dict]) -> dict:
|
|
|
461
485
|
return dict(sorted(result.items()))
|
|
462
486
|
|
|
463
487
|
|
|
488
|
+
def validate_sheet_data_input(data: str | None) -> SheetDataInput:
|
|
489
|
+
"""
|
|
490
|
+
Validate and convert data to SheetDataInput, raising RetryableToolError on validation failure.
|
|
491
|
+
`data` is a JSON string representing a dictionary that maps row numbers to dictionaries that map
|
|
492
|
+
column letters to cell values.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
data: The data parameter to validate, a JSON string representing a dictionary that maps
|
|
496
|
+
row numbers to dictionaries that map column letters to cell values.
|
|
497
|
+
Type hint: dict[int, dict[str, int | float | str | bool]]
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
SheetDataInput: The validated sheet data input object
|
|
501
|
+
|
|
502
|
+
Raises:
|
|
503
|
+
RetryableToolError: If the data is invalid JSON or has an unexpected format
|
|
504
|
+
"""
|
|
505
|
+
try:
|
|
506
|
+
return SheetDataInput(data=data) # type: ignore[arg-type]
|
|
507
|
+
except Exception as e:
|
|
508
|
+
msg = "Invalid JSON or unexpected data format for parameter `data`"
|
|
509
|
+
raise RetryableToolError(
|
|
510
|
+
message=msg,
|
|
511
|
+
additional_prompt_content=f"{msg}: {e}",
|
|
512
|
+
retry_after_ms=100,
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
|
|
464
516
|
def validate_write_to_cell_params( # type: ignore[no-any-unimported]
|
|
465
517
|
service: Resource,
|
|
466
518
|
spreadsheet_id: str,
|
|
@@ -590,7 +642,7 @@ def calculate_a1_sheet_range(
|
|
|
590
642
|
return None
|
|
591
643
|
|
|
592
644
|
|
|
593
|
-
def
|
|
645
|
+
def get_sheet_by_identifier(
|
|
594
646
|
sheets: list[Sheet], sheet_identifier: str, sheet_identifier_type: SheetIdentifierType
|
|
595
647
|
) -> Sheet | None:
|
|
596
648
|
"""
|
|
@@ -622,6 +674,62 @@ def find_sheet_by_identifier(
|
|
|
622
674
|
return None
|
|
623
675
|
|
|
624
676
|
|
|
677
|
+
def get_spreadsheet_metadata_helper(sheets_service: Resource, spreadsheet_id: str) -> Spreadsheet: # type: ignore[no-any-unimported]
|
|
678
|
+
"""Get the spreadsheet metadata to collect the sheet names and dimensions
|
|
679
|
+
|
|
680
|
+
Args:
|
|
681
|
+
sheets_service (Resource): The Google Sheets service.
|
|
682
|
+
spreadsheet_id (str): The ID of the spreadsheet provided to the tool.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
Spreadsheet: The spreadsheet with only the metadata.
|
|
686
|
+
"""
|
|
687
|
+
metadata_response = (
|
|
688
|
+
sheets_service.spreadsheets()
|
|
689
|
+
.get(
|
|
690
|
+
spreadsheetId=spreadsheet_id,
|
|
691
|
+
includeGridData=False,
|
|
692
|
+
fields="spreadsheetId,spreadsheetUrl,properties/title,sheets/properties",
|
|
693
|
+
)
|
|
694
|
+
.execute()
|
|
695
|
+
)
|
|
696
|
+
return Spreadsheet.model_validate(metadata_response)
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
def batch_update(service: Resource, spreadsheet_id: str, data: list[ValueRange]) -> dict: # type: ignore[no-any-unimported]
|
|
700
|
+
"""
|
|
701
|
+
Batch update a spreadsheet with a list of ValueRanges.
|
|
702
|
+
|
|
703
|
+
Args:
|
|
704
|
+
service (Resource): The Google Sheets service.
|
|
705
|
+
spreadsheet_id (str): The ID of the spreadsheet to update.
|
|
706
|
+
data (list[ValueRange]): The data to update the spreadsheet with.
|
|
707
|
+
|
|
708
|
+
Returns:
|
|
709
|
+
dict: The response from the batch update.
|
|
710
|
+
"""
|
|
711
|
+
body = {
|
|
712
|
+
"valueInputOption": "USER_ENTERED",
|
|
713
|
+
"data": [value_range.model_dump() for value_range in data],
|
|
714
|
+
}
|
|
715
|
+
response = (
|
|
716
|
+
service.spreadsheets()
|
|
717
|
+
.values()
|
|
718
|
+
.batchUpdate(spreadsheetId=spreadsheet_id, body=body)
|
|
719
|
+
.execute()
|
|
720
|
+
)
|
|
721
|
+
updated_ranges = [
|
|
722
|
+
value_response["updatedRange"] for value_response in response.get("responses", [])
|
|
723
|
+
]
|
|
724
|
+
return {
|
|
725
|
+
"spreadsheet_id": response["spreadsheetId"],
|
|
726
|
+
"total_updated_rows": response["totalUpdatedRows"],
|
|
727
|
+
"total_updated_columns": response["totalUpdatedColumns"],
|
|
728
|
+
"total_updated_cells": response["totalUpdatedCells"],
|
|
729
|
+
"updated_ranges": updated_ranges,
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
|
|
625
733
|
def get_spreadsheet_with_pagination( # type: ignore[no-any-unimported]
|
|
626
734
|
service: Resource,
|
|
627
735
|
spreadsheet_id: str,
|
|
@@ -651,26 +759,19 @@ def get_spreadsheet_with_pagination( # type: ignore[no-any-unimported]
|
|
|
651
759
|
"""
|
|
652
760
|
|
|
653
761
|
# First, only get the spreadsheet metadata to collect the sheet names and dimensions
|
|
654
|
-
|
|
655
|
-
service.spreadsheets()
|
|
656
|
-
.get(
|
|
657
|
-
spreadsheetId=spreadsheet_id,
|
|
658
|
-
includeGridData=False,
|
|
659
|
-
fields="spreadsheetId,spreadsheetUrl,properties/title,sheets/properties",
|
|
660
|
-
)
|
|
661
|
-
.execute()
|
|
662
|
-
)
|
|
663
|
-
spreadsheet = Spreadsheet.model_validate(metadata_response)
|
|
762
|
+
spreadsheet_with_only_metadata = get_spreadsheet_metadata_helper(service, spreadsheet_id)
|
|
664
763
|
|
|
665
|
-
target_sheet =
|
|
666
|
-
|
|
764
|
+
target_sheet = get_sheet_by_identifier(
|
|
765
|
+
spreadsheet_with_only_metadata.sheets, sheet_identifier, sheet_identifier_type
|
|
667
766
|
)
|
|
668
767
|
if not target_sheet:
|
|
669
768
|
raise ToolExecutionError(
|
|
670
769
|
message=f"Sheet with identifier '{sheet_identifier}' not found",
|
|
671
770
|
developer_message=(
|
|
672
771
|
"Sheet(s) in the spreadsheet: "
|
|
673
|
-
+ ", ".join([
|
|
772
|
+
+ ", ".join([
|
|
773
|
+
sheet.model_dump_json() for sheet in spreadsheet_with_only_metadata.sheets
|
|
774
|
+
])
|
|
674
775
|
),
|
|
675
776
|
)
|
|
676
777
|
|
|
@@ -706,7 +807,7 @@ def get_spreadsheet_with_pagination( # type: ignore[no-any-unimported]
|
|
|
706
807
|
.execute()
|
|
707
808
|
)
|
|
708
809
|
else:
|
|
709
|
-
response =
|
|
810
|
+
response = spreadsheet_with_only_metadata.model_dump()
|
|
710
811
|
|
|
711
812
|
return parse_get_spreadsheet_response(response)
|
|
712
813
|
|
|
@@ -765,6 +866,61 @@ def process_get_spreadsheet_params(
|
|
|
765
866
|
)
|
|
766
867
|
|
|
767
868
|
|
|
869
|
+
def get_sheet_metadata_from_identifier( # type: ignore[no-any-unimported]
|
|
870
|
+
service: Resource,
|
|
871
|
+
spreadsheet_id: str,
|
|
872
|
+
sheet_position: int | None,
|
|
873
|
+
sheet_id_or_name: str | None,
|
|
874
|
+
) -> tuple[str, int, str]:
|
|
875
|
+
"""Get the actual sheet name from position, id, or name identifier.
|
|
876
|
+
|
|
877
|
+
Args:
|
|
878
|
+
service (Resource): The Google Sheets service.
|
|
879
|
+
spreadsheet_id (str): The ID of the spreadsheet.
|
|
880
|
+
sheet_position (int | None): The position/tab of the sheet (1-indexed).
|
|
881
|
+
sheet_id_or_name (str | None): The id or name of the sheet.
|
|
882
|
+
|
|
883
|
+
Returns:
|
|
884
|
+
tuple[str, str, str]: The sheet's title, id, and url.
|
|
885
|
+
|
|
886
|
+
Raises:
|
|
887
|
+
ToolExecutionError: If the sheet is not found.
|
|
888
|
+
"""
|
|
889
|
+
# Determine the sheet identifier and type
|
|
890
|
+
if sheet_id_or_name:
|
|
891
|
+
sheet_identifier = sheet_id_or_name
|
|
892
|
+
sheet_identifier_type = SheetIdentifierType.ID_OR_NAME
|
|
893
|
+
elif sheet_position:
|
|
894
|
+
sheet_identifier = str(sheet_position)
|
|
895
|
+
sheet_identifier_type = SheetIdentifierType.POSITION
|
|
896
|
+
else:
|
|
897
|
+
# Default to first sheet
|
|
898
|
+
sheet_identifier = "1"
|
|
899
|
+
sheet_identifier_type = SheetIdentifierType.POSITION
|
|
900
|
+
|
|
901
|
+
spreadsheet = get_spreadsheet_metadata_helper(service, spreadsheet_id)
|
|
902
|
+
|
|
903
|
+
target_sheet = get_sheet_by_identifier(
|
|
904
|
+
spreadsheet.sheets, sheet_identifier, sheet_identifier_type
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
if not target_sheet:
|
|
908
|
+
raise ToolExecutionError(
|
|
909
|
+
message=f"Sheet with {sheet_identifier_type.value} '{sheet_identifier}' not found",
|
|
910
|
+
developer_message=(
|
|
911
|
+
"Sheet(s) in the spreadsheet: "
|
|
912
|
+
+ ", ".join([sheet.properties.title for sheet in spreadsheet.sheets])
|
|
913
|
+
),
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
sheet_url = sheet_url_template.format(
|
|
917
|
+
spreadsheet_id=spreadsheet_id,
|
|
918
|
+
sheet_id=target_sheet.properties.sheetId,
|
|
919
|
+
)
|
|
920
|
+
|
|
921
|
+
return target_sheet.properties.title, target_sheet.properties.sheetId, sheet_url
|
|
922
|
+
|
|
923
|
+
|
|
768
924
|
def raise_for_large_payload(data: dict) -> None:
|
|
769
925
|
"""Enforce a 10MB limit on the data size.
|
|
770
926
|
|
|
@@ -783,3 +939,85 @@ def raise_for_large_payload(data: dict) -> None:
|
|
|
783
939
|
"Please reduce the number of rows and columns you are requesting and try again.",
|
|
784
940
|
developer_message=f"Data size: {num_bytes / 1024 / 1024:.4f}MB",
|
|
785
941
|
)
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
# ------------------------------
|
|
945
|
+
# Search Utils
|
|
946
|
+
# ------------------------------
|
|
947
|
+
def build_files_list_query(
|
|
948
|
+
mime_type: str,
|
|
949
|
+
document_contains: list[str] | None = None,
|
|
950
|
+
document_not_contains: list[str] | None = None,
|
|
951
|
+
) -> str:
|
|
952
|
+
query = [f"(mimeType = '{mime_type}' and trashed = false)"]
|
|
953
|
+
|
|
954
|
+
if isinstance(document_contains, str):
|
|
955
|
+
document_contains = [document_contains]
|
|
956
|
+
|
|
957
|
+
if isinstance(document_not_contains, str):
|
|
958
|
+
document_not_contains = [document_not_contains]
|
|
959
|
+
|
|
960
|
+
if document_contains:
|
|
961
|
+
for keyword in document_contains:
|
|
962
|
+
name_contains = keyword.replace("'", "\\'")
|
|
963
|
+
full_text_contains = keyword.replace("'", "\\'")
|
|
964
|
+
keyword_query = (
|
|
965
|
+
f"(name contains '{name_contains}' or fullText contains '{full_text_contains}')"
|
|
966
|
+
)
|
|
967
|
+
query.append(keyword_query)
|
|
968
|
+
|
|
969
|
+
if document_not_contains:
|
|
970
|
+
for keyword in document_not_contains:
|
|
971
|
+
name_not_contains = keyword.replace("'", "\\'")
|
|
972
|
+
full_text_not_contains = keyword.replace("'", "\\'")
|
|
973
|
+
keyword_query = (
|
|
974
|
+
f"(not (name contains '{name_not_contains}' or "
|
|
975
|
+
f"fullText contains '{full_text_not_contains}'))"
|
|
976
|
+
)
|
|
977
|
+
query.append(keyword_query)
|
|
978
|
+
|
|
979
|
+
return " and ".join(query)
|
|
980
|
+
|
|
981
|
+
|
|
982
|
+
def build_files_list_params(
|
|
983
|
+
mime_type: str,
|
|
984
|
+
page_size: int,
|
|
985
|
+
order_by: list[OrderBy] | None,
|
|
986
|
+
pagination_token: str | None,
|
|
987
|
+
include_shared_drives: bool,
|
|
988
|
+
search_only_in_shared_drive_id: str | None,
|
|
989
|
+
include_organization_domain_spreadsheets: bool,
|
|
990
|
+
spreadsheet_contains: list[str] | None = None,
|
|
991
|
+
spreadsheet_not_contains: list[str] | None = None,
|
|
992
|
+
) -> dict[str, Any]:
|
|
993
|
+
query = build_files_list_query(
|
|
994
|
+
mime_type=mime_type,
|
|
995
|
+
document_contains=spreadsheet_contains,
|
|
996
|
+
document_not_contains=spreadsheet_not_contains,
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
params = {
|
|
1000
|
+
"q": query,
|
|
1001
|
+
"pageSize": page_size,
|
|
1002
|
+
"orderBy": ",".join([item.value for item in order_by]) if order_by else None,
|
|
1003
|
+
"pageToken": pagination_token,
|
|
1004
|
+
}
|
|
1005
|
+
|
|
1006
|
+
if (
|
|
1007
|
+
include_shared_drives
|
|
1008
|
+
or search_only_in_shared_drive_id
|
|
1009
|
+
or include_organization_domain_spreadsheets
|
|
1010
|
+
):
|
|
1011
|
+
params["includeItemsFromAllDrives"] = "true"
|
|
1012
|
+
params["supportsAllDrives"] = "true"
|
|
1013
|
+
|
|
1014
|
+
if search_only_in_shared_drive_id:
|
|
1015
|
+
params["driveId"] = search_only_in_shared_drive_id
|
|
1016
|
+
params["corpora"] = Corpora.DRIVE.value
|
|
1017
|
+
|
|
1018
|
+
if include_organization_domain_spreadsheets:
|
|
1019
|
+
params["corpora"] = Corpora.DOMAIN.value
|
|
1020
|
+
|
|
1021
|
+
params = remove_none_values(params)
|
|
1022
|
+
|
|
1023
|
+
return params
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
arcade_google_sheets/__init__.py,sha256=FB9h_cws_gu3UJp32GWlqvBQyAOb77JfAJNSSBqz-Jk,177
|
|
2
|
+
arcade_google_sheets/constants.py,sha256=4tQOrQ1YagSklJSSw5Eq21XCcFCJdO7lso5SqWIdrPI,63
|
|
3
|
+
arcade_google_sheets/converters.py,sha256=1muE_28Jk0eyFUzoVo_lN0_qy3z-uv7f5XoZLH9mitQ,3924
|
|
4
|
+
arcade_google_sheets/decorators.py,sha256=QMqfvSXaFBoxYJrz69EGeMdAxF0V7JPReVXfp73Nf3Y,753
|
|
5
|
+
arcade_google_sheets/enums.py,sha256=30OMPu2l_aUjz8kQzC1mR5w0lLbs0fZn8T6PDtoXpL4,4608
|
|
6
|
+
arcade_google_sheets/file_picker.py,sha256=kGfUVfH5QVlIW1sL-_gAwPokt7TwVEcPk3Vnk53GKUE,2005
|
|
7
|
+
arcade_google_sheets/models.py,sha256=xwPdwUis0OHTDbSLy85qWl7zIh2lDTi5HZMNKwdZjzo,8627
|
|
8
|
+
arcade_google_sheets/templates.py,sha256=p3ty6Kwo7l73EEsZTaRfdG4jzQ9XvlqqeXMLKB1ydEw,489
|
|
9
|
+
arcade_google_sheets/types.py,sha256=R-rCRcyFqDZx3jgl_kWeCliqC8fHuZ8ub_LQ2KoU2AE,37
|
|
10
|
+
arcade_google_sheets/utils.py,sha256=VmDZOzAOEtfSPOra-ieVl_U16RLonQUOnZ4RW4Gf-oA,34895
|
|
11
|
+
arcade_google_sheets/tools/__init__.py,sha256=IiSd-0Q4_uxeKI8nVMT3JVRoF5G5GB8e5Z9SkGpSnt8,472
|
|
12
|
+
arcade_google_sheets/tools/read.py,sha256=g7uGhyhFNlDPHKT3xhg7NdMaSYuvzzefSzrb1XqiI80,4309
|
|
13
|
+
arcade_google_sheets/tools/search.py,sha256=EnO739RmBryDerYnzV7eD_drchNNVqS50DgdySYh9Ac,5381
|
|
14
|
+
arcade_google_sheets/tools/write.py,sha256=4kNx941PQt6VUGTogbepnbfUdcsVze6u5c8QvlNnWCI,7782
|
|
15
|
+
arcade_google_sheets-3.1.1.dist-info/METADATA,sha256=M6BDKH6yb3xVzPC9ByD0BjJePVy0D5g5x7IxyiouuZ8,1123
|
|
16
|
+
arcade_google_sheets-3.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
17
|
+
arcade_google_sheets-3.1.1.dist-info/licenses/LICENSE,sha256=ixeE7aL9b2B-_ZYHTY1vQcJB4NufKeo-LWwKNObGDN0,1960
|
|
18
|
+
arcade_google_sheets-3.1.1.dist-info/RECORD,,
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
arcade_google_sheets/__init__.py,sha256=FB9h_cws_gu3UJp32GWlqvBQyAOb77JfAJNSSBqz-Jk,177
|
|
2
|
-
arcade_google_sheets/constants.py,sha256=4tQOrQ1YagSklJSSw5Eq21XCcFCJdO7lso5SqWIdrPI,63
|
|
3
|
-
arcade_google_sheets/decorators.py,sha256=QMqfvSXaFBoxYJrz69EGeMdAxF0V7JPReVXfp73Nf3Y,753
|
|
4
|
-
arcade_google_sheets/enums.py,sha256=xaBm-P6h63q2xrnvd8qqzS1gW5tAkUqbQcw0KL6q7Iw,1038
|
|
5
|
-
arcade_google_sheets/file_picker.py,sha256=kGfUVfH5QVlIW1sL-_gAwPokt7TwVEcPk3Vnk53GKUE,2005
|
|
6
|
-
arcade_google_sheets/models.py,sha256=VQy3L_Acch1MEM2RkTe-Qp_AEU-cb0JciLJ-0Ci87aw,7613
|
|
7
|
-
arcade_google_sheets/types.py,sha256=R-rCRcyFqDZx3jgl_kWeCliqC8fHuZ8ub_LQ2KoU2AE,37
|
|
8
|
-
arcade_google_sheets/utils.py,sha256=0SCjW92wedFilovwc-tERAtfdBYGZ1OZlil_XUZEiVc,26803
|
|
9
|
-
arcade_google_sheets/tools/__init__.py,sha256=TPlitJn1VJffCXFkpOtoYXNsaEFkpujQzsYvuikCe4U,209
|
|
10
|
-
arcade_google_sheets/tools/read.py,sha256=tdDrXwt1PCytQy1oBlU4HKZ7OjiTpIU43og5VkFAMqs,2704
|
|
11
|
-
arcade_google_sheets/tools/write.py,sha256=gmbErdBbBKUEPxGjCWDpMJ9pMWRAvoEjApIXFQax6Z4,3598
|
|
12
|
-
arcade_google_sheets-3.0.0.dist-info/METADATA,sha256=gZhKVztmJe2asXyH_cTcjcQDcACl-2r_AaglFBLBqOs,1123
|
|
13
|
-
arcade_google_sheets-3.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
arcade_google_sheets-3.0.0.dist-info/licenses/LICENSE,sha256=ixeE7aL9b2B-_ZYHTY1vQcJB4NufKeo-LWwKNObGDN0,1960
|
|
15
|
-
arcade_google_sheets-3.0.0.dist-info/RECORD,,
|
|
File without changes
|
{arcade_google_sheets-3.0.0.dist-info → arcade_google_sheets-3.1.1.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|