workspace-mcp 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auth/google_auth.py +120 -12
- auth/oauth_callback_server.py +7 -3
- auth/service_decorator.py +31 -32
- core/context.py +22 -0
- core/server.py +5 -7
- core/utils.py +36 -0
- gcalendar/calendar_tools.py +308 -258
- gchat/chat_tools.py +131 -158
- gdocs/docs_tools.py +299 -149
- gdrive/drive_tools.py +168 -171
- gforms/forms_tools.py +118 -157
- gmail/gmail_tools.py +319 -400
- gsheets/sheets_tools.py +144 -197
- gslides/slides_tools.py +113 -157
- main.py +30 -24
- {workspace_mcp-1.0.1.dist-info → workspace_mcp-1.0.3.dist-info}/METADATA +61 -9
- workspace_mcp-1.0.3.dist-info/RECORD +33 -0
- workspace_mcp-1.0.1.dist-info/RECORD +0 -32
- {workspace_mcp-1.0.1.dist-info → workspace_mcp-1.0.3.dist-info}/WHEEL +0 -0
- {workspace_mcp-1.0.1.dist-info → workspace_mcp-1.0.3.dist-info}/entry_points.txt +0 -0
- {workspace_mcp-1.0.1.dist-info → workspace_mcp-1.0.3.dist-info}/licenses/LICENSE +0 -0
- {workspace_mcp-1.0.1.dist-info → workspace_mcp-1.0.3.dist-info}/top_level.txt +0 -0
gdocs/docs_tools.py
CHANGED
@@ -14,13 +14,14 @@ from googleapiclient.http import MediaIoBaseDownload
|
|
14
14
|
|
15
15
|
# Auth & server utilities
|
16
16
|
from auth.service_decorator import require_google_service, require_multiple_services
|
17
|
-
from core.utils import extract_office_xml_text
|
17
|
+
from core.utils import extract_office_xml_text, handle_http_errors
|
18
18
|
from core.server import server
|
19
19
|
|
20
20
|
logger = logging.getLogger(__name__)
|
21
21
|
|
22
22
|
@server.tool()
|
23
23
|
@require_google_service("drive", "drive_read")
|
24
|
+
@handle_http_errors("search_docs")
|
24
25
|
async def search_docs(
|
25
26
|
service,
|
26
27
|
user_google_email: str,
|
@@ -35,36 +36,32 @@ async def search_docs(
|
|
35
36
|
"""
|
36
37
|
logger.info(f"[search_docs] Email={user_google_email}, Query='{query}'")
|
37
38
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
39
|
+
escaped_query = query.replace("'", "\\'")
|
40
|
+
|
41
|
+
response = await asyncio.to_thread(
|
42
|
+
service.files().list(
|
43
|
+
q=f"name contains '{escaped_query}' and mimeType='application/vnd.google-apps.document' and trashed=false",
|
44
|
+
pageSize=page_size,
|
45
|
+
fields="files(id, name, createdTime, modifiedTime, webViewLink)"
|
46
|
+
).execute
|
47
|
+
)
|
48
|
+
files = response.get('files', [])
|
49
|
+
if not files:
|
50
|
+
return f"No Google Docs found matching '{query}'."
|
51
|
+
|
52
|
+
output = [f"Found {len(files)} Google Docs matching '{query}':"]
|
53
|
+
for f in files:
|
54
|
+
output.append(
|
55
|
+
f"- {f['name']} (ID: {f['id']}) Modified: {f.get('modifiedTime')} Link: {f.get('webViewLink')}"
|
47
56
|
)
|
48
|
-
|
49
|
-
if not files:
|
50
|
-
return f"No Google Docs found matching '{query}'."
|
51
|
-
|
52
|
-
output = [f"Found {len(files)} Google Docs matching '{query}':"]
|
53
|
-
for f in files:
|
54
|
-
output.append(
|
55
|
-
f"- {f['name']} (ID: {f['id']}) Modified: {f.get('modifiedTime')} Link: {f.get('webViewLink')}"
|
56
|
-
)
|
57
|
-
return "\n".join(output)
|
58
|
-
|
59
|
-
except HttpError as e:
|
60
|
-
logger.error(f"API error in search_docs: {e}", exc_info=True)
|
61
|
-
raise Exception(f"API error: {e}")
|
57
|
+
return "\n".join(output)
|
62
58
|
|
63
59
|
@server.tool()
|
64
60
|
@require_multiple_services([
|
65
61
|
{"service_type": "drive", "scopes": "drive_read", "param_name": "drive_service"},
|
66
62
|
{"service_type": "docs", "scopes": "docs_read", "param_name": "docs_service"}
|
67
63
|
])
|
64
|
+
@handle_http_errors("get_doc_content")
|
68
65
|
async def get_doc_content(
|
69
66
|
drive_service,
|
70
67
|
docs_service,
|
@@ -81,97 +78,87 @@ async def get_doc_content(
|
|
81
78
|
"""
|
82
79
|
logger.info(f"[get_doc_content] Invoked. Document/File ID: '{document_id}' for user '{user_google_email}'")
|
83
80
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
81
|
+
# Step 2: Get file metadata from Drive
|
82
|
+
file_metadata = await asyncio.to_thread(
|
83
|
+
drive_service.files().get(
|
84
|
+
fileId=document_id, fields="id, name, mimeType, webViewLink"
|
85
|
+
).execute
|
86
|
+
)
|
87
|
+
mime_type = file_metadata.get("mimeType", "")
|
88
|
+
file_name = file_metadata.get("name", "Unknown File")
|
89
|
+
web_view_link = file_metadata.get("webViewLink", "#")
|
90
|
+
|
91
|
+
logger.info(f"[get_doc_content] File '{file_name}' (ID: {document_id}) has mimeType: '{mime_type}'")
|
92
|
+
|
93
|
+
body_text = "" # Initialize body_text
|
94
|
+
|
95
|
+
# Step 3: Process based on mimeType
|
96
|
+
if mime_type == "application/vnd.google-apps.document":
|
97
|
+
logger.info(f"[get_doc_content] Processing as native Google Doc.")
|
98
|
+
doc_data = await asyncio.to_thread(
|
99
|
+
docs_service.documents().get(documentId=document_id).execute
|
90
100
|
)
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
else:
|
121
|
-
logger.info(f"[get_doc_content] Processing as Drive file (e.g., .docx, other). MimeType: {mime_type}")
|
122
|
-
|
123
|
-
export_mime_type_map = {
|
124
|
-
# Example: "application/vnd.google-apps.spreadsheet"z: "text/csv",
|
125
|
-
# Native GSuite types that are not Docs would go here if this function
|
126
|
-
# was intended to export them. For .docx, direct download is used.
|
127
|
-
}
|
128
|
-
effective_export_mime = export_mime_type_map.get(mime_type)
|
129
|
-
|
130
|
-
request_obj = (
|
131
|
-
drive_service.files().export_media(fileId=document_id, mimeType=effective_export_mime)
|
132
|
-
if effective_export_mime
|
133
|
-
else drive_service.files().get_media(fileId=document_id)
|
134
|
-
)
|
135
|
-
|
136
|
-
fh = io.BytesIO()
|
137
|
-
downloader = MediaIoBaseDownload(fh, request_obj)
|
138
|
-
loop = asyncio.get_event_loop()
|
139
|
-
done = False
|
140
|
-
while not done:
|
141
|
-
status, done = await loop.run_in_executor(None, downloader.next_chunk)
|
142
|
-
|
143
|
-
file_content_bytes = fh.getvalue()
|
144
|
-
|
145
|
-
office_text = extract_office_xml_text(file_content_bytes, mime_type)
|
146
|
-
if office_text:
|
147
|
-
body_text = office_text
|
148
|
-
else:
|
149
|
-
try:
|
150
|
-
body_text = file_content_bytes.decode("utf-8")
|
151
|
-
except UnicodeDecodeError:
|
152
|
-
body_text = (
|
153
|
-
f"[Binary or unsupported text encoding for mimeType '{mime_type}' - "
|
154
|
-
f"{len(file_content_bytes)} bytes]"
|
155
|
-
)
|
156
|
-
|
157
|
-
header = (
|
158
|
-
f'File: "{file_name}" (ID: {document_id}, Type: {mime_type})\n'
|
159
|
-
f'Link: {web_view_link}\n\n--- CONTENT ---\n'
|
101
|
+
body_elements = doc_data.get('body', {}).get('content', [])
|
102
|
+
|
103
|
+
processed_text_lines: List[str] = []
|
104
|
+
for element in body_elements:
|
105
|
+
if 'paragraph' in element:
|
106
|
+
paragraph = element.get('paragraph', {})
|
107
|
+
para_elements = paragraph.get('elements', [])
|
108
|
+
current_line_text = ""
|
109
|
+
for pe in para_elements:
|
110
|
+
text_run = pe.get('textRun', {})
|
111
|
+
if text_run and 'content' in text_run:
|
112
|
+
current_line_text += text_run['content']
|
113
|
+
if current_line_text.strip():
|
114
|
+
processed_text_lines.append(current_line_text)
|
115
|
+
body_text = "".join(processed_text_lines)
|
116
|
+
else:
|
117
|
+
logger.info(f"[get_doc_content] Processing as Drive file (e.g., .docx, other). MimeType: {mime_type}")
|
118
|
+
|
119
|
+
export_mime_type_map = {
|
120
|
+
# Example: "application/vnd.google-apps.spreadsheet"z: "text/csv",
|
121
|
+
# Native GSuite types that are not Docs would go here if this function
|
122
|
+
# was intended to export them. For .docx, direct download is used.
|
123
|
+
}
|
124
|
+
effective_export_mime = export_mime_type_map.get(mime_type)
|
125
|
+
|
126
|
+
request_obj = (
|
127
|
+
drive_service.files().export_media(fileId=document_id, mimeType=effective_export_mime)
|
128
|
+
if effective_export_mime
|
129
|
+
else drive_service.files().get_media(fileId=document_id)
|
160
130
|
)
|
161
|
-
return header + body_text
|
162
131
|
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
132
|
+
fh = io.BytesIO()
|
133
|
+
downloader = MediaIoBaseDownload(fh, request_obj)
|
134
|
+
loop = asyncio.get_event_loop()
|
135
|
+
done = False
|
136
|
+
while not done:
|
137
|
+
status, done = await loop.run_in_executor(None, downloader.next_chunk)
|
138
|
+
|
139
|
+
file_content_bytes = fh.getvalue()
|
140
|
+
|
141
|
+
office_text = extract_office_xml_text(file_content_bytes, mime_type)
|
142
|
+
if office_text:
|
143
|
+
body_text = office_text
|
144
|
+
else:
|
145
|
+
try:
|
146
|
+
body_text = file_content_bytes.decode("utf-8")
|
147
|
+
except UnicodeDecodeError:
|
148
|
+
body_text = (
|
149
|
+
f"[Binary or unsupported text encoding for mimeType '{mime_type}' - "
|
150
|
+
f"{len(file_content_bytes)} bytes]"
|
151
|
+
)
|
152
|
+
|
153
|
+
header = (
|
154
|
+
f'File: "{file_name}" (ID: {document_id}, Type: {mime_type})\n'
|
155
|
+
f'Link: {web_view_link}\n\n--- CONTENT ---\n'
|
156
|
+
)
|
157
|
+
return header + body_text
|
172
158
|
|
173
159
|
@server.tool()
|
174
160
|
@require_google_service("drive", "drive_read")
|
161
|
+
@handle_http_errors("list_docs_in_folder")
|
175
162
|
async def list_docs_in_folder(
|
176
163
|
service,
|
177
164
|
user_google_email: str,
|
@@ -186,34 +173,27 @@ async def list_docs_in_folder(
|
|
186
173
|
"""
|
187
174
|
logger.info(f"[list_docs_in_folder] Invoked. Email: '{user_google_email}', Folder ID: '{folder_id}'")
|
188
175
|
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
return "\n".join(out)
|
204
|
-
|
205
|
-
except HttpError as e:
|
206
|
-
logger.error(f"API error in list_docs_in_folder: {e}", exc_info=True)
|
207
|
-
raise Exception(f"API error: {e}")
|
208
|
-
except Exception as e:
|
209
|
-
logger.exception(f"Unexpected error in list_docs_in_folder: {e}")
|
210
|
-
raise Exception(f"Unexpected error: {e}")
|
176
|
+
rsp = await asyncio.to_thread(
|
177
|
+
service.files().list(
|
178
|
+
q=f"'{folder_id}' in parents and mimeType='application/vnd.google-apps.document' and trashed=false",
|
179
|
+
pageSize=page_size,
|
180
|
+
fields="files(id, name, modifiedTime, webViewLink)"
|
181
|
+
).execute
|
182
|
+
)
|
183
|
+
items = rsp.get('files', [])
|
184
|
+
if not items:
|
185
|
+
return f"No Google Docs found in folder '{folder_id}'."
|
186
|
+
out = [f"Found {len(items)} Docs in folder '{folder_id}':"]
|
187
|
+
for f in items:
|
188
|
+
out.append(f"- {f['name']} (ID: {f['id']}) Modified: {f.get('modifiedTime')} Link: {f.get('webViewLink')}")
|
189
|
+
return "\n".join(out)
|
211
190
|
|
212
191
|
@server.tool()
|
213
192
|
@require_google_service("docs", "docs_write")
|
193
|
+
@handle_http_errors("create_doc")
|
214
194
|
async def create_doc(
|
215
195
|
service,
|
216
|
-
user_google_email: str,
|
196
|
+
user_google_email: str,
|
217
197
|
title: str,
|
218
198
|
content: str = '',
|
219
199
|
) -> str:
|
@@ -225,20 +205,190 @@ async def create_doc(
|
|
225
205
|
"""
|
226
206
|
logger.info(f"[create_doc] Invoked. Email: '{user_google_email}', Title='{title}'")
|
227
207
|
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
208
|
+
doc = await asyncio.to_thread(service.documents().create(body={'title': title}).execute)
|
209
|
+
doc_id = doc.get('documentId')
|
210
|
+
if content:
|
211
|
+
requests = [{'insertText': {'location': {'index': 1}, 'text': content}}]
|
212
|
+
await asyncio.to_thread(service.documents().batchUpdate(documentId=doc_id, body={'requests': requests}).execute)
|
213
|
+
link = f"https://docs.google.com/document/d/{doc_id}/edit"
|
214
|
+
msg = f"Created Google Doc '{title}' (ID: {doc_id}) for {user_google_email}. Link: {link}"
|
215
|
+
logger.info(f"Successfully created Google Doc '{title}' (ID: {doc_id}) for {user_google_email}. Link: {link}")
|
216
|
+
return msg
|
217
|
+
|
218
|
+
|
219
|
+
@server.tool()
|
220
|
+
@require_google_service("drive", "drive_read")
|
221
|
+
@handle_http_errors("read_doc_comments")
|
222
|
+
async def read_doc_comments(
|
223
|
+
service,
|
224
|
+
user_google_email: str,
|
225
|
+
document_id: str,
|
226
|
+
) -> str:
|
227
|
+
"""
|
228
|
+
Read all comments from a Google Doc.
|
229
|
+
|
230
|
+
Args:
|
231
|
+
document_id: The ID of the Google Document
|
232
|
+
|
233
|
+
Returns:
|
234
|
+
str: A formatted list of all comments and replies in the document.
|
235
|
+
"""
|
236
|
+
logger.info(f"[read_doc_comments] Reading comments for document {document_id}")
|
237
|
+
|
238
|
+
response = await asyncio.to_thread(
|
239
|
+
service.comments().list(
|
240
|
+
fileId=document_id,
|
241
|
+
fields="comments(id,content,author,createdTime,modifiedTime,resolved,replies(content,author,id,createdTime,modifiedTime))"
|
242
|
+
).execute
|
243
|
+
)
|
244
|
+
|
245
|
+
comments = response.get('comments', [])
|
246
|
+
|
247
|
+
if not comments:
|
248
|
+
return f"No comments found in document {document_id}"
|
249
|
+
|
250
|
+
output = [f"Found {len(comments)} comments in document {document_id}:\n"]
|
251
|
+
|
252
|
+
for comment in comments:
|
253
|
+
author = comment.get('author', {}).get('displayName', 'Unknown')
|
254
|
+
content = comment.get('content', '')
|
255
|
+
created = comment.get('createdTime', '')
|
256
|
+
resolved = comment.get('resolved', False)
|
257
|
+
comment_id = comment.get('id', '')
|
258
|
+
status = " [RESOLVED]" if resolved else ""
|
259
|
+
|
260
|
+
output.append(f"Comment ID: {comment_id}")
|
261
|
+
output.append(f"Author: {author}")
|
262
|
+
output.append(f"Created: {created}{status}")
|
263
|
+
output.append(f"Content: {content}")
|
264
|
+
|
265
|
+
# Add replies if any
|
266
|
+
replies = comment.get('replies', [])
|
267
|
+
if replies:
|
268
|
+
output.append(f" Replies ({len(replies)}):")
|
269
|
+
for reply in replies:
|
270
|
+
reply_author = reply.get('author', {}).get('displayName', 'Unknown')
|
271
|
+
reply_content = reply.get('content', '')
|
272
|
+
reply_created = reply.get('createdTime', '')
|
273
|
+
reply_id = reply.get('id', '')
|
274
|
+
output.append(f" Reply ID: {reply_id}")
|
275
|
+
output.append(f" Author: {reply_author}")
|
276
|
+
output.append(f" Created: {reply_created}")
|
277
|
+
output.append(f" Content: {reply_content}")
|
278
|
+
|
279
|
+
output.append("") # Empty line between comments
|
280
|
+
|
281
|
+
return "\n".join(output)
|
282
|
+
|
283
|
+
|
284
|
+
@server.tool()
|
285
|
+
@require_google_service("drive", "drive_file")
|
286
|
+
@handle_http_errors("reply_to_comment")
|
287
|
+
async def reply_to_comment(
|
288
|
+
service,
|
289
|
+
user_google_email: str,
|
290
|
+
document_id: str,
|
291
|
+
comment_id: str,
|
292
|
+
reply_content: str,
|
293
|
+
) -> str:
|
294
|
+
"""
|
295
|
+
Reply to a specific comment in a Google Doc.
|
296
|
+
|
297
|
+
Args:
|
298
|
+
document_id: The ID of the Google Document
|
299
|
+
comment_id: The ID of the comment to reply to
|
300
|
+
reply_content: The content of the reply
|
301
|
+
|
302
|
+
Returns:
|
303
|
+
str: Confirmation message with reply details.
|
304
|
+
"""
|
305
|
+
logger.info(f"[reply_to_comment] Replying to comment {comment_id} in document {document_id}")
|
306
|
+
|
307
|
+
body = {'content': reply_content}
|
308
|
+
|
309
|
+
reply = await asyncio.to_thread(
|
310
|
+
service.replies().create(
|
311
|
+
fileId=document_id,
|
312
|
+
commentId=comment_id,
|
313
|
+
body=body,
|
314
|
+
fields="id,content,author,createdTime,modifiedTime"
|
315
|
+
).execute
|
316
|
+
)
|
317
|
+
|
318
|
+
reply_id = reply.get('id', '')
|
319
|
+
author = reply.get('author', {}).get('displayName', 'Unknown')
|
320
|
+
created = reply.get('createdTime', '')
|
321
|
+
|
322
|
+
return f"Reply posted successfully!\nReply ID: {reply_id}\nAuthor: {author}\nCreated: {created}\nContent: {reply_content}"
|
323
|
+
|
324
|
+
|
325
|
+
@server.tool()
|
326
|
+
@require_google_service("drive", "drive_file")
|
327
|
+
@handle_http_errors("create_doc_comment")
|
328
|
+
async def create_doc_comment(
|
329
|
+
service,
|
330
|
+
user_google_email: str,
|
331
|
+
document_id: str,
|
332
|
+
comment_content: str,
|
333
|
+
) -> str:
|
334
|
+
"""
|
335
|
+
Create a new comment on a Google Doc.
|
336
|
+
|
337
|
+
Args:
|
338
|
+
document_id: The ID of the Google Document
|
339
|
+
comment_content: The content of the comment
|
340
|
+
|
341
|
+
Returns:
|
342
|
+
str: Confirmation message with comment details.
|
343
|
+
"""
|
344
|
+
logger.info(f"[create_doc_comment] Creating comment in document {document_id}")
|
345
|
+
|
346
|
+
body = {"content": comment_content}
|
347
|
+
|
348
|
+
comment = await asyncio.to_thread(
|
349
|
+
service.comments().create(
|
350
|
+
fileId=document_id,
|
351
|
+
body=body,
|
352
|
+
fields="id,content,author,createdTime,modifiedTime"
|
353
|
+
).execute
|
354
|
+
)
|
355
|
+
|
356
|
+
comment_id = comment.get('id', '')
|
357
|
+
author = comment.get('author', {}).get('displayName', 'Unknown')
|
358
|
+
created = comment.get('createdTime', '')
|
359
|
+
|
360
|
+
return f"Comment created successfully!\nComment ID: {comment_id}\nAuthor: {author}\nCreated: {created}\nContent: {comment_content}"
|
361
|
+
|
362
|
+
|
363
|
+
@server.tool()
|
364
|
+
@require_google_service("drive", "drive_file")
|
365
|
+
@handle_http_errors("resolve_comment")
|
366
|
+
async def resolve_comment(
|
367
|
+
service,
|
368
|
+
user_google_email: str,
|
369
|
+
document_id: str,
|
370
|
+
comment_id: str,
|
371
|
+
) -> str:
|
372
|
+
"""
|
373
|
+
Resolve a comment in a Google Doc.
|
374
|
+
|
375
|
+
Args:
|
376
|
+
document_id: The ID of the Google Document
|
377
|
+
comment_id: The ID of the comment to resolve
|
378
|
+
|
379
|
+
Returns:
|
380
|
+
str: Confirmation message.
|
381
|
+
"""
|
382
|
+
logger.info(f"[resolve_comment] Resolving comment {comment_id} in document {document_id}")
|
383
|
+
|
384
|
+
body = {"resolved": True}
|
385
|
+
|
386
|
+
await asyncio.to_thread(
|
387
|
+
service.comments().update(
|
388
|
+
fileId=document_id,
|
389
|
+
commentId=comment_id,
|
390
|
+
body=body
|
391
|
+
).execute
|
392
|
+
)
|
393
|
+
|
394
|
+
return f"Comment {comment_id} has been resolved successfully."
|