mcp-server-mturk 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_server_mturk-0.1.0.dist-info/METADATA +271 -0
- mcp_server_mturk-0.1.0.dist-info/RECORD +14 -0
- mcp_server_mturk-0.1.0.dist-info/WHEEL +4 -0
- mcp_server_mturk-0.1.0.dist-info/entry_points.txt +2 -0
- mcp_server_mturk-0.1.0.dist-info/licenses/LICENSE +21 -0
- mturk_mcp/__init__.py +3 -0
- mturk_mcp/client.py +75 -0
- mturk_mcp/server.py +75 -0
- mturk_mcp/tools/__init__.py +5 -0
- mturk_mcp/tools/account.py +109 -0
- mturk_mcp/tools/assignments.py +329 -0
- mturk_mcp/tools/hits.py +384 -0
- mturk_mcp/tools/qualifications.py +341 -0
- mturk_mcp/tools/workers.py +359 -0
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
"""Assignment management tools for reviewing and approving work."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from mcp.types import TextContent
|
|
5
|
+
|
|
6
|
+
from ..client import get_mturk_client, format_error
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def register_tools(register):
|
|
10
|
+
"""Register assignment-related tools."""
|
|
11
|
+
|
|
12
|
+
async def list_assignments(args: dict) -> list[TextContent]:
|
|
13
|
+
"""List all assignments for a specific HIT."""
|
|
14
|
+
try:
|
|
15
|
+
client = get_mturk_client()
|
|
16
|
+
params = {
|
|
17
|
+
"HITId": args["hit_id"],
|
|
18
|
+
"MaxResults": min(args.get("max_results", 100), 100),
|
|
19
|
+
}
|
|
20
|
+
if args.get("status"):
|
|
21
|
+
params["AssignmentStatuses"] = [args["status"]]
|
|
22
|
+
|
|
23
|
+
response = client.list_assignments_for_hit(**params)
|
|
24
|
+
assignments = response.get("Assignments", [])
|
|
25
|
+
result = {
|
|
26
|
+
"count": len(assignments),
|
|
27
|
+
"assignments": [
|
|
28
|
+
{
|
|
29
|
+
"assignment_id": a["AssignmentId"],
|
|
30
|
+
"worker_id": a["WorkerId"],
|
|
31
|
+
"status": a["AssignmentStatus"],
|
|
32
|
+
"accept_time": str(a.get("AcceptTime")),
|
|
33
|
+
"submit_time": str(a.get("SubmitTime")),
|
|
34
|
+
"answer": a.get("Answer"),
|
|
35
|
+
}
|
|
36
|
+
for a in assignments
|
|
37
|
+
],
|
|
38
|
+
}
|
|
39
|
+
return [TextContent(type="text", text=json.dumps(result, indent=2))]
|
|
40
|
+
except Exception as e:
|
|
41
|
+
return [TextContent(type="text", text=format_error(e))]
|
|
42
|
+
|
|
43
|
+
register(
|
|
44
|
+
"list_assignments",
|
|
45
|
+
"""Retrieves all worker submissions (assignments) for a specific HIT.
|
|
46
|
+
|
|
47
|
+
Use this tool to see who has worked on your HIT and what they submitted. This is the main way to get worker responses. Each assignment represents one worker's completion of your task.
|
|
48
|
+
|
|
49
|
+
The response includes the worker's answer in XML format (MTurk's standard format). Parse the <Answer> field to extract form field values. For example, if your form had <input name="response">, look for <QuestionIdentifier>response</QuestionIdentifier> and its <FreeText> value.
|
|
50
|
+
|
|
51
|
+
Assignment statuses:
|
|
52
|
+
- 'Submitted': Worker finished, awaiting your review - YOU NEED TO APPROVE OR REJECT
|
|
53
|
+
- 'Approved': You approved it, worker has been paid
|
|
54
|
+
- 'Rejected': You rejected it, worker was not paid
|
|
55
|
+
|
|
56
|
+
Filter by status to see only assignments needing review (status='Submitted') or to audit past decisions.
|
|
57
|
+
|
|
58
|
+
Returns assignment_id (needed for approve/reject), worker_id (identifies the worker), and the actual answer data.
|
|
59
|
+
|
|
60
|
+
IMPORTANT: Workers are not paid until you approve their submission. Assignments left unreviewed will auto-approve after the HIT's auto_approval_delay (default 3 days).""",
|
|
61
|
+
{
|
|
62
|
+
"type": "object",
|
|
63
|
+
"properties": {
|
|
64
|
+
"hit_id": {
|
|
65
|
+
"type": "string",
|
|
66
|
+
"description": "The unique HIT ID to list assignments for. Get this from create_hit, list_hits, or list_reviewable_hits."
|
|
67
|
+
},
|
|
68
|
+
"status": {
|
|
69
|
+
"type": "string",
|
|
70
|
+
"enum": ["Submitted", "Approved", "Rejected"],
|
|
71
|
+
"description": "Optional filter by assignment status. 'Submitted' shows work awaiting review (most common use). 'Approved'/'Rejected' show past reviewed work. Omit to see all assignments regardless of status."
|
|
72
|
+
},
|
|
73
|
+
"max_results": {
|
|
74
|
+
"type": "integer",
|
|
75
|
+
"description": "Maximum number of assignments to return, between 1 and 100. Defaults to 100.",
|
|
76
|
+
"default": 100,
|
|
77
|
+
"minimum": 1,
|
|
78
|
+
"maximum": 100
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
"required": ["hit_id"],
|
|
82
|
+
},
|
|
83
|
+
list_assignments,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
async def get_assignment(args: dict) -> list[TextContent]:
|
|
87
|
+
"""Get details of a specific assignment including the worker's answer."""
|
|
88
|
+
try:
|
|
89
|
+
client = get_mturk_client()
|
|
90
|
+
response = client.get_assignment(AssignmentId=args["assignment_id"])
|
|
91
|
+
a = response["Assignment"]
|
|
92
|
+
hit = response.get("HIT", {})
|
|
93
|
+
result = {
|
|
94
|
+
"assignment_id": a["AssignmentId"],
|
|
95
|
+
"hit_id": a["HITId"],
|
|
96
|
+
"worker_id": a["WorkerId"],
|
|
97
|
+
"status": a["AssignmentStatus"],
|
|
98
|
+
"accept_time": str(a.get("AcceptTime")),
|
|
99
|
+
"submit_time": str(a.get("SubmitTime")),
|
|
100
|
+
"approval_time": str(a.get("ApprovalTime")) if a.get("ApprovalTime") else None,
|
|
101
|
+
"rejection_time": str(a.get("RejectionTime")) if a.get("RejectionTime") else None,
|
|
102
|
+
"answer": a.get("Answer"),
|
|
103
|
+
"requester_feedback": a.get("RequesterFeedback"),
|
|
104
|
+
"hit_title": hit.get("Title"),
|
|
105
|
+
}
|
|
106
|
+
return [TextContent(type="text", text=json.dumps(result, indent=2))]
|
|
107
|
+
except Exception as e:
|
|
108
|
+
return [TextContent(type="text", text=format_error(e))]
|
|
109
|
+
|
|
110
|
+
register(
|
|
111
|
+
"get_assignment",
|
|
112
|
+
"""Retrieves complete details for a single assignment, including the full worker response.
|
|
113
|
+
|
|
114
|
+
Use this tool when you need to examine one specific submission in detail, rather than listing all assignments. Useful for reviewing a particular worker's response or checking the status of a known assignment.
|
|
115
|
+
|
|
116
|
+
Returns all assignment data including:
|
|
117
|
+
- The worker's complete answer (in MTurk XML format)
|
|
118
|
+
- Timestamps for when they accepted, submitted, and when you approved/rejected
|
|
119
|
+
- Any feedback you provided when approving/rejecting
|
|
120
|
+
- The associated HIT's title for context
|
|
121
|
+
|
|
122
|
+
The answer field contains XML with the worker's form responses. Look for <QuestionIdentifier> tags matching your form input names, with values in <FreeText> or <SelectionIdentifier> tags.
|
|
123
|
+
|
|
124
|
+
This is helpful for:
|
|
125
|
+
- Detailed review of a specific submission before approving/rejecting
|
|
126
|
+
- Checking what feedback was given on a past assignment
|
|
127
|
+
- Debugging issues with a particular worker's submission""",
|
|
128
|
+
{
|
|
129
|
+
"type": "object",
|
|
130
|
+
"properties": {
|
|
131
|
+
"assignment_id": {
|
|
132
|
+
"type": "string",
|
|
133
|
+
"description": "The unique assignment ID to retrieve. Get this from list_assignments. Format is alphanumeric."
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
"required": ["assignment_id"],
|
|
137
|
+
},
|
|
138
|
+
get_assignment,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
async def approve_assignment(args: dict) -> list[TextContent]:
|
|
142
|
+
"""Approve an assignment and pay the worker."""
|
|
143
|
+
try:
|
|
144
|
+
client = get_mturk_client()
|
|
145
|
+
params = {"AssignmentId": args["assignment_id"]}
|
|
146
|
+
if args.get("feedback"):
|
|
147
|
+
params["RequesterFeedback"] = args["feedback"]
|
|
148
|
+
if args.get("override_rejection"):
|
|
149
|
+
params["OverrideRejection"] = True
|
|
150
|
+
|
|
151
|
+
client.approve_assignment(**params)
|
|
152
|
+
return [TextContent(type="text", text=json.dumps({
|
|
153
|
+
"success": True,
|
|
154
|
+
"assignment_id": args["assignment_id"],
|
|
155
|
+
"action": "approved",
|
|
156
|
+
"override_rejection": args.get("override_rejection", False),
|
|
157
|
+
}, indent=2))]
|
|
158
|
+
except Exception as e:
|
|
159
|
+
return [TextContent(type="text", text=format_error(e))]
|
|
160
|
+
|
|
161
|
+
register(
|
|
162
|
+
"approve_assignment",
|
|
163
|
+
"""Approves a worker's submission and triggers payment to the worker.
|
|
164
|
+
|
|
165
|
+
Use this tool when a worker has satisfactorily completed your task. Approval is the standard action for acceptable work - it pays the worker the HIT's reward amount and positively affects their reputation.
|
|
166
|
+
|
|
167
|
+
IMPORTANT: Approval triggers an immediate, irreversible payment from your account. The worker receives the full reward amount. MTurk fees are charged to you.
|
|
168
|
+
|
|
169
|
+
Best practices:
|
|
170
|
+
- Approve good-faith efforts even if not perfect (rejection hurts worker reputation)
|
|
171
|
+
- Reserve rejection for clear spam, gibberish, or obvious bad-faith submissions
|
|
172
|
+
- Provide feedback to help workers improve (optional but appreciated)
|
|
173
|
+
|
|
174
|
+
The override_rejection option lets you reverse a previous rejection - useful if you rejected by mistake or the worker appealed successfully. This will pay them and fix their reputation.
|
|
175
|
+
|
|
176
|
+
Workers can see your feedback, so keep it constructive. Example: "Thank you for your detailed response!" or "Good work, approved."
|
|
177
|
+
|
|
178
|
+
Assignments auto-approve after the HIT's auto_approval_delay if you don't manually review them.""",
|
|
179
|
+
{
|
|
180
|
+
"type": "object",
|
|
181
|
+
"properties": {
|
|
182
|
+
"assignment_id": {
|
|
183
|
+
"type": "string",
|
|
184
|
+
"description": "The unique assignment ID to approve. Get this from list_assignments. Must be in 'Submitted' status (or 'Rejected' if using override_rejection)."
|
|
185
|
+
},
|
|
186
|
+
"feedback": {
|
|
187
|
+
"type": "string",
|
|
188
|
+
"description": "Optional message to the worker explaining the approval. Workers can see this. Keep it brief and positive. Example: 'Great work, thank you!'"
|
|
189
|
+
},
|
|
190
|
+
"override_rejection": {
|
|
191
|
+
"type": "boolean",
|
|
192
|
+
"description": "Set to true to reverse a previous rejection and approve instead. Use this to fix mistaken rejections. The worker will be paid and their stats corrected.",
|
|
193
|
+
"default": False
|
|
194
|
+
},
|
|
195
|
+
},
|
|
196
|
+
"required": ["assignment_id"],
|
|
197
|
+
},
|
|
198
|
+
approve_assignment,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
async def reject_assignment(args: dict) -> list[TextContent]:
|
|
202
|
+
"""Reject an assignment. The worker will not be paid."""
|
|
203
|
+
try:
|
|
204
|
+
client = get_mturk_client()
|
|
205
|
+
client.reject_assignment(
|
|
206
|
+
AssignmentId=args["assignment_id"],
|
|
207
|
+
RequesterFeedback=args["feedback"],
|
|
208
|
+
)
|
|
209
|
+
return [TextContent(type="text", text=json.dumps({
|
|
210
|
+
"success": True,
|
|
211
|
+
"assignment_id": args["assignment_id"],
|
|
212
|
+
"action": "rejected",
|
|
213
|
+
"feedback": args["feedback"],
|
|
214
|
+
}, indent=2))]
|
|
215
|
+
except Exception as e:
|
|
216
|
+
return [TextContent(type="text", text=format_error(e))]
|
|
217
|
+
|
|
218
|
+
register(
|
|
219
|
+
"reject_assignment",
|
|
220
|
+
"""Rejects a worker's submission. The worker will NOT be paid and their reputation is negatively affected.
|
|
221
|
+
|
|
222
|
+
USE WITH CAUTION: Rejection is a serious action that hurts the worker. Only reject for clear violations:
|
|
223
|
+
- Spam or gibberish responses
|
|
224
|
+
- Obvious bot/automated submissions
|
|
225
|
+
- Completely off-topic or zero-effort responses
|
|
226
|
+
- Clear evidence of cheating or bad faith
|
|
227
|
+
|
|
228
|
+
DO NOT reject for:
|
|
229
|
+
- Minor mistakes or imperfect work (approve and provide feedback instead)
|
|
230
|
+
- Responses you disagree with (subjective tasks should be approved)
|
|
231
|
+
- Your own unclear instructions (that's on you, approve it)
|
|
232
|
+
|
|
233
|
+
You MUST provide feedback explaining why you're rejecting. This is required by MTurk and helps:
|
|
234
|
+
- Workers understand what went wrong
|
|
235
|
+
- Protect you if the worker disputes the rejection
|
|
236
|
+
- MTurk review the rejection if appealed
|
|
237
|
+
|
|
238
|
+
Workers can appeal rejections to MTurk, and frequent unjustified rejections can get your requester account flagged.
|
|
239
|
+
|
|
240
|
+
If you reject by mistake, use approve_assignment with override_rejection=true to reverse it.""",
|
|
241
|
+
{
|
|
242
|
+
"type": "object",
|
|
243
|
+
"properties": {
|
|
244
|
+
"assignment_id": {
|
|
245
|
+
"type": "string",
|
|
246
|
+
"description": "The unique assignment ID to reject. Get this from list_assignments. Must be in 'Submitted' status."
|
|
247
|
+
},
|
|
248
|
+
"feedback": {
|
|
249
|
+
"type": "string",
|
|
250
|
+
"description": "REQUIRED explanation for the rejection. Be specific about what was wrong. Example: 'Response was empty' or 'Answer does not address the question asked'. Workers see this message."
|
|
251
|
+
},
|
|
252
|
+
},
|
|
253
|
+
"required": ["assignment_id", "feedback"],
|
|
254
|
+
},
|
|
255
|
+
reject_assignment,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
async def approve_all_submitted_assignments(args: dict) -> list[TextContent]:
|
|
259
|
+
"""Approve all submitted (pending) assignments for a HIT."""
|
|
260
|
+
try:
|
|
261
|
+
client = get_mturk_client()
|
|
262
|
+
hit_id = args["hit_id"]
|
|
263
|
+
|
|
264
|
+
# Get all submitted assignments
|
|
265
|
+
response = client.list_assignments_for_hit(
|
|
266
|
+
HITId=hit_id,
|
|
267
|
+
AssignmentStatuses=["Submitted"],
|
|
268
|
+
MaxResults=100,
|
|
269
|
+
)
|
|
270
|
+
assignments = response.get("Assignments", [])
|
|
271
|
+
|
|
272
|
+
approved = []
|
|
273
|
+
failed = []
|
|
274
|
+
|
|
275
|
+
for a in assignments:
|
|
276
|
+
try:
|
|
277
|
+
params = {"AssignmentId": a["AssignmentId"]}
|
|
278
|
+
if args.get("feedback"):
|
|
279
|
+
params["RequesterFeedback"] = args["feedback"]
|
|
280
|
+
client.approve_assignment(**params)
|
|
281
|
+
approved.append(a["AssignmentId"])
|
|
282
|
+
except Exception as e:
|
|
283
|
+
failed.append({"assignment_id": a["AssignmentId"], "error": str(e)})
|
|
284
|
+
|
|
285
|
+
return [TextContent(type="text", text=json.dumps({
|
|
286
|
+
"hit_id": hit_id,
|
|
287
|
+
"approved_count": len(approved),
|
|
288
|
+
"failed_count": len(failed),
|
|
289
|
+
"approved_assignments": approved,
|
|
290
|
+
"failed_assignments": failed,
|
|
291
|
+
}, indent=2))]
|
|
292
|
+
except Exception as e:
|
|
293
|
+
return [TextContent(type="text", text=format_error(e))]
|
|
294
|
+
|
|
295
|
+
register(
|
|
296
|
+
"approve_all_submitted_assignments",
|
|
297
|
+
"""Bulk approves all pending (submitted) assignments for a HIT in one operation.
|
|
298
|
+
|
|
299
|
+
Use this tool when you want to quickly approve all work on a HIT without reviewing each submission individually. This is convenient for surveys or tasks where you trust all responses, or when you just want to pay everyone and move on.
|
|
300
|
+
|
|
301
|
+
IMPORTANT: This triggers payments for ALL pending submissions on the HIT. Make sure you:
|
|
302
|
+
1. Actually want to approve everything (no spam or bad submissions)
|
|
303
|
+
2. Have sufficient account balance (use get_account_balance to check)
|
|
304
|
+
|
|
305
|
+
The tool finds all assignments with status='Submitted' for the specified HIT and approves each one. It reports:
|
|
306
|
+
- How many were successfully approved
|
|
307
|
+
- Any that failed (with error details)
|
|
308
|
+
|
|
309
|
+
Failed approvals are usually due to the assignment already being approved/rejected, or network issues. Successfully approved assignments are paid immediately.
|
|
310
|
+
|
|
311
|
+
For HITs with many assignments, this is much faster than approving one at a time. The optional feedback message is sent to all workers.
|
|
312
|
+
|
|
313
|
+
Note: Only processes up to 100 assignments. For HITs with more pending work, run this tool multiple times.""",
|
|
314
|
+
{
|
|
315
|
+
"type": "object",
|
|
316
|
+
"properties": {
|
|
317
|
+
"hit_id": {
|
|
318
|
+
"type": "string",
|
|
319
|
+
"description": "The unique HIT ID to approve all submissions for. All pending (Submitted) assignments for this HIT will be approved."
|
|
320
|
+
},
|
|
321
|
+
"feedback": {
|
|
322
|
+
"type": "string",
|
|
323
|
+
"description": "Optional feedback message sent to ALL workers being approved. Keep it generic since it goes to everyone. Example: 'Thank you for your participation!'"
|
|
324
|
+
},
|
|
325
|
+
},
|
|
326
|
+
"required": ["hit_id"],
|
|
327
|
+
},
|
|
328
|
+
approve_all_submitted_assignments,
|
|
329
|
+
)
|