atlassian-cli 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atlassian_cli/__init__.py +3 -0
- atlassian_cli/config.py +57 -0
- atlassian_cli/conflu.py +408 -0
- atlassian_cli/http.py +35 -0
- atlassian_cli/jira.py +149 -0
- atlassian_cli/jira_assets.py +167 -0
- atlassian_cli/jira_issues.py +147 -0
- atlassian_cli/output.py +28 -0
- atlassian_cli-0.3.0.dist-info/METADATA +235 -0
- atlassian_cli-0.3.0.dist-info/RECORD +14 -0
- atlassian_cli-0.3.0.dist-info/WHEEL +5 -0
- atlassian_cli-0.3.0.dist-info/entry_points.txt +3 -0
- atlassian_cli-0.3.0.dist-info/licenses/LICENSE +21 -0
- atlassian_cli-0.3.0.dist-info/top_level.txt +1 -0
atlassian_cli/config.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Shared Atlassian Cloud configuration and session factory for atlassian_cli package."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
from requests.auth import HTTPBasicAuth
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def load_env(path=None):
|
|
11
|
+
"""Manually parse a .env file into a dict, skipping comments and blank lines."""
|
|
12
|
+
if path is None:
|
|
13
|
+
paths = [os.path.join(os.getcwd(), '.env'), os.path.join(os.path.dirname(__file__), '.env')]
|
|
14
|
+
else:
|
|
15
|
+
paths = [path]
|
|
16
|
+
|
|
17
|
+
for env_path in paths:
|
|
18
|
+
if os.path.exists(env_path):
|
|
19
|
+
with open(env_path, 'r') as file:
|
|
20
|
+
return dict(line.strip().split('=', 1) for line in file if not line.startswith('#') and '=' in line)
|
|
21
|
+
return {}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_config():
|
|
25
|
+
"""Return tuple (url, email, token) from .env or environment variables."""
|
|
26
|
+
env = load_env()
|
|
27
|
+
url = env.get('ATLASSIAN_URL', os.environ.get('ATLASSIAN_URL')) or \
|
|
28
|
+
env.get('CONFLUENCE_URL', os.environ.get('CONFLUENCE_URL'))
|
|
29
|
+
email = env.get('ATLASSIAN_EMAIL', os.environ.get('ATLASSIAN_EMAIL')) or \
|
|
30
|
+
env.get('CONFLUENCE_EMAIL', os.environ.get('CONFLUENCE_EMAIL'))
|
|
31
|
+
token = env.get('ATLASSIAN_TOKEN', os.environ.get('ATLASSIAN_TOKEN')) or \
|
|
32
|
+
env.get('CONFLUENCE_TOKEN', os.environ.get('CONFLUENCE_TOKEN'))
|
|
33
|
+
|
|
34
|
+
if not all([url, email, token]):
|
|
35
|
+
sys.stderr.write('ERR Missing ATLASSIAN_URL, ATLASSIAN_EMAIL, or ATLASSIAN_TOKEN\n'
|
|
36
|
+
'Set them in .env or as environment variables.\n')
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
return url.rstrip('/'), email, token
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_session(email, token):
|
|
43
|
+
"""Create requests.Session with HTTPBasicAuth and JSON headers."""
|
|
44
|
+
session = requests.Session()
|
|
45
|
+
session.auth = HTTPBasicAuth(email, token)
|
|
46
|
+
session.headers.update({
|
|
47
|
+
'Accept': 'application/json',
|
|
48
|
+
'Content-Type': 'application/json'
|
|
49
|
+
})
|
|
50
|
+
return session
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def setup():
|
|
54
|
+
"""Setup configuration and session, returning (session, base_url)."""
|
|
55
|
+
url, email, token = get_config()
|
|
56
|
+
session = get_session(email, token)
|
|
57
|
+
return session, url
|
atlassian_cli/conflu.py
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Confluence Cloud CLI — fast ADF page management via REST API v2.
|
|
3
|
+
|
|
4
|
+
Commands:
|
|
5
|
+
get Download a page (ADF + metadata)
|
|
6
|
+
put Upload local ADF to Confluence
|
|
7
|
+
diff Compare local vs remote ADF
|
|
8
|
+
sync Bulk-download all pages in a space
|
|
9
|
+
search Search local page index
|
|
10
|
+
index Rebuild page-index.json from API
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import argparse
|
|
14
|
+
import difflib
|
|
15
|
+
import json
|
|
16
|
+
import os
|
|
17
|
+
import sys
|
|
18
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
19
|
+
|
|
20
|
+
from atlassian_cli.config import setup
|
|
21
|
+
from atlassian_cli.http import APIError, api_get, api_put
|
|
22
|
+
from atlassian_cli.output import emit, emit_error, set_json_mode
|
|
23
|
+
|
|
24
|
+
V2 = '/wiki/api/v2'
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# ---------------------------------------------------------------------------
|
|
28
|
+
# Confluence v2 methods
|
|
29
|
+
# ---------------------------------------------------------------------------
|
|
30
|
+
|
|
31
|
+
_space_cache = {}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_page(session, base, page_id):
|
|
35
|
+
"""Fetch a single page with ADF body."""
|
|
36
|
+
data = api_get(session, base, f'{V2}/pages/{page_id}',
|
|
37
|
+
**{'body-format': 'atlas_doc_format'})
|
|
38
|
+
body = data.get('body', {}).get('atlas_doc_format', {})
|
|
39
|
+
if isinstance(body.get('value'), str):
|
|
40
|
+
try:
|
|
41
|
+
body['value'] = json.loads(body['value'])
|
|
42
|
+
except json.JSONDecodeError:
|
|
43
|
+
pass
|
|
44
|
+
return data
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_space(session, base, *, key=None, space_id=None):
|
|
48
|
+
"""Look up a space by key or ID. Results are cached."""
|
|
49
|
+
if key and key in _space_cache:
|
|
50
|
+
return _space_cache[key]
|
|
51
|
+
if space_id and space_id in _space_cache:
|
|
52
|
+
return _space_cache[space_id]
|
|
53
|
+
|
|
54
|
+
if key:
|
|
55
|
+
data = api_get(session, base, f'{V2}/spaces', keys=key)
|
|
56
|
+
results = data.get('results', [])
|
|
57
|
+
if not results:
|
|
58
|
+
raise APIError(404, f'Space not found: {key}')
|
|
59
|
+
space = results[0]
|
|
60
|
+
elif space_id:
|
|
61
|
+
space = api_get(session, base, f'{V2}/spaces/{space_id}')
|
|
62
|
+
else:
|
|
63
|
+
raise ValueError('Provide key or space_id')
|
|
64
|
+
|
|
65
|
+
_space_cache[space.get('key', '')] = space
|
|
66
|
+
_space_cache[space['id']] = space
|
|
67
|
+
return space
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def list_pages(session, base, space_id):
|
|
71
|
+
"""Cursor-paginated listing of all pages in a space."""
|
|
72
|
+
pages = []
|
|
73
|
+
url = f'{base}{V2}/spaces/{space_id}/pages?limit=250&sort=id'
|
|
74
|
+
while url:
|
|
75
|
+
resp = session.get(url)
|
|
76
|
+
resp.raise_for_status()
|
|
77
|
+
data = resp.json()
|
|
78
|
+
pages.extend(data.get('results', []))
|
|
79
|
+
next_link = data.get('_links', {}).get('next')
|
|
80
|
+
if next_link:
|
|
81
|
+
url = f'{base}{next_link}' if next_link.startswith('/') else next_link
|
|
82
|
+
else:
|
|
83
|
+
url = None
|
|
84
|
+
return pages
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# ---------------------------------------------------------------------------
|
|
88
|
+
# Local file I/O
|
|
89
|
+
# ---------------------------------------------------------------------------
|
|
90
|
+
|
|
91
|
+
def _ver(page):
|
|
92
|
+
v = page.get('version', {})
|
|
93
|
+
return v.get('number', 0) if isinstance(v, dict) else int(v or 0)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _ver_ts(page):
|
|
97
|
+
v = page.get('version', {})
|
|
98
|
+
return v.get('createdAt', '') if isinstance(v, dict) else ''
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def save_page(page_data, space_key, pages_dir):
|
|
102
|
+
page_id = page_data['id']
|
|
103
|
+
space_dir = os.path.join(pages_dir, space_key)
|
|
104
|
+
os.makedirs(space_dir, exist_ok=True)
|
|
105
|
+
|
|
106
|
+
body = page_data.get('body', {}).get('atlas_doc_format', {}).get('value', {})
|
|
107
|
+
adf_path = os.path.join(space_dir, f'{page_id}.json')
|
|
108
|
+
with open(adf_path, 'w') as f:
|
|
109
|
+
json.dump(body, f, indent=2)
|
|
110
|
+
|
|
111
|
+
meta = {
|
|
112
|
+
'id': page_id,
|
|
113
|
+
'title': page_data.get('title', ''),
|
|
114
|
+
'spaceId': page_data.get('spaceId', ''),
|
|
115
|
+
'spaceKey': space_key,
|
|
116
|
+
'version': _ver(page_data),
|
|
117
|
+
'parentId': page_data.get('parentId', ''),
|
|
118
|
+
'updatedAt': _ver_ts(page_data),
|
|
119
|
+
}
|
|
120
|
+
meta_path = os.path.join(space_dir, f'{page_id}.meta.json')
|
|
121
|
+
with open(meta_path, 'w') as f:
|
|
122
|
+
json.dump(meta, f, indent=2)
|
|
123
|
+
|
|
124
|
+
return adf_path, meta_path
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _find_page_file(page_id, pages_dir, suffix):
|
|
128
|
+
if not os.path.isdir(pages_dir):
|
|
129
|
+
return None
|
|
130
|
+
for entry in os.listdir(pages_dir):
|
|
131
|
+
candidate = os.path.join(pages_dir, entry, f'{page_id}{suffix}')
|
|
132
|
+
if os.path.isfile(candidate):
|
|
133
|
+
return candidate
|
|
134
|
+
return None
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def load_meta(page_id, pages_dir):
|
|
138
|
+
path = _find_page_file(page_id, pages_dir, '.meta.json')
|
|
139
|
+
if not path:
|
|
140
|
+
return None
|
|
141
|
+
with open(path) as f:
|
|
142
|
+
return json.load(f)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def load_adf(page_id, pages_dir):
|
|
146
|
+
path = _find_page_file(page_id, pages_dir, '.json')
|
|
147
|
+
if not path:
|
|
148
|
+
return None
|
|
149
|
+
with open(path) as f:
|
|
150
|
+
return json.load(f)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# ---------------------------------------------------------------------------
|
|
154
|
+
# Commands
|
|
155
|
+
# ---------------------------------------------------------------------------
|
|
156
|
+
|
|
157
|
+
def cmd_get(args):
|
|
158
|
+
session, base = setup()
|
|
159
|
+
page = get_page(session, base, args.page_id)
|
|
160
|
+
space = get_space(session, base, space_id=page['spaceId'])
|
|
161
|
+
space_key = space.get('key', str(page['spaceId']))
|
|
162
|
+
adf_path, _ = save_page(page, space_key, args.dir)
|
|
163
|
+
emit('OK', f'{page["title"]} (v{_ver(page)}) -> {adf_path}')
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def cmd_put(args):
|
|
167
|
+
session, base = setup()
|
|
168
|
+
|
|
169
|
+
meta = load_meta(args.page_id, args.dir)
|
|
170
|
+
if not meta:
|
|
171
|
+
emit_error(f'No local metadata for page {args.page_id}')
|
|
172
|
+
sys.exit(1)
|
|
173
|
+
adf = load_adf(args.page_id, args.dir)
|
|
174
|
+
if not adf:
|
|
175
|
+
emit_error(f'No local ADF for page {args.page_id}')
|
|
176
|
+
sys.exit(1)
|
|
177
|
+
|
|
178
|
+
remote = get_page(session, base, args.page_id)
|
|
179
|
+
remote_ver = _ver(remote)
|
|
180
|
+
local_ver = meta.get('version', 0)
|
|
181
|
+
|
|
182
|
+
if not args.force and remote_ver != local_ver:
|
|
183
|
+
emit_error(f'Version conflict: local v{local_ver}, remote v{remote_ver}. Use --force to overwrite.')
|
|
184
|
+
sys.exit(1)
|
|
185
|
+
|
|
186
|
+
new_version = remote_ver + 1
|
|
187
|
+
result = api_put(session, base, f'{V2}/pages/{args.page_id}', {
|
|
188
|
+
'id': str(args.page_id),
|
|
189
|
+
'status': 'current',
|
|
190
|
+
'title': meta['title'],
|
|
191
|
+
'body': {
|
|
192
|
+
'representation': 'atlas_doc_format',
|
|
193
|
+
'value': json.dumps(adf),
|
|
194
|
+
},
|
|
195
|
+
'version': {
|
|
196
|
+
'number': new_version,
|
|
197
|
+
'message': 'Updated via confluence CLI',
|
|
198
|
+
},
|
|
199
|
+
})
|
|
200
|
+
|
|
201
|
+
meta['version'] = new_version
|
|
202
|
+
meta['updatedAt'] = _ver_ts(result)
|
|
203
|
+
space_key = meta.get('spaceKey', '')
|
|
204
|
+
meta_path = os.path.join(args.dir, space_key, f'{args.page_id}.meta.json')
|
|
205
|
+
with open(meta_path, 'w') as f:
|
|
206
|
+
json.dump(meta, f, indent=2)
|
|
207
|
+
|
|
208
|
+
emit('OK', f'{meta["title"]} updated to v{new_version}')
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def cmd_diff(args):
|
|
212
|
+
session, base = setup()
|
|
213
|
+
|
|
214
|
+
local_adf = load_adf(args.page_id, args.dir)
|
|
215
|
+
if not local_adf:
|
|
216
|
+
emit_error(f'No local ADF for page {args.page_id}')
|
|
217
|
+
sys.exit(1)
|
|
218
|
+
|
|
219
|
+
remote = get_page(session, base, args.page_id)
|
|
220
|
+
remote_adf = remote.get('body', {}).get('atlas_doc_format', {}).get('value', {})
|
|
221
|
+
|
|
222
|
+
local_lines = json.dumps(local_adf, indent=2, sort_keys=True).splitlines(keepends=True)
|
|
223
|
+
remote_lines = json.dumps(remote_adf, indent=2, sort_keys=True).splitlines(keepends=True)
|
|
224
|
+
|
|
225
|
+
diff = list(difflib.unified_diff(
|
|
226
|
+
local_lines, remote_lines,
|
|
227
|
+
fromfile=f'local/{args.page_id}.json',
|
|
228
|
+
tofile=f'remote/{args.page_id}',
|
|
229
|
+
))
|
|
230
|
+
if diff:
|
|
231
|
+
sys.stdout.writelines(diff)
|
|
232
|
+
else:
|
|
233
|
+
meta = load_meta(args.page_id, args.dir) or {}
|
|
234
|
+
emit('OK', f'No differences — {meta.get("title", args.page_id)}')
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def cmd_sync(args):
|
|
238
|
+
session, base = setup()
|
|
239
|
+
space = get_space(session, base, key=args.space_key)
|
|
240
|
+
space_id = space['id']
|
|
241
|
+
space_key = space.get('key', args.space_key)
|
|
242
|
+
|
|
243
|
+
print(f'Listing pages in {space_key}…', file=sys.stderr)
|
|
244
|
+
pages = list_pages(session, base, space_id)
|
|
245
|
+
print(f'Found {len(pages)} pages', file=sys.stderr)
|
|
246
|
+
|
|
247
|
+
to_fetch = []
|
|
248
|
+
skipped = 0
|
|
249
|
+
for page in pages:
|
|
250
|
+
page_id = page['id']
|
|
251
|
+
remote_ver = _ver(page)
|
|
252
|
+
if not args.force:
|
|
253
|
+
meta = load_meta(page_id, args.dir)
|
|
254
|
+
if meta and meta.get('version', 0) >= remote_ver:
|
|
255
|
+
skipped += 1
|
|
256
|
+
continue
|
|
257
|
+
to_fetch.append(page)
|
|
258
|
+
|
|
259
|
+
if skipped:
|
|
260
|
+
print(f'SKIP {skipped} pages already up-to-date', file=sys.stderr)
|
|
261
|
+
|
|
262
|
+
if not to_fetch:
|
|
263
|
+
emit('DONE', f'{space_key}: {len(pages)} pages, all up-to-date')
|
|
264
|
+
return
|
|
265
|
+
|
|
266
|
+
print(f'Fetching {len(to_fetch)} pages ({args.workers} workers)…', file=sys.stderr)
|
|
267
|
+
|
|
268
|
+
errors = 0
|
|
269
|
+
|
|
270
|
+
def fetch_one(page):
|
|
271
|
+
nonlocal errors
|
|
272
|
+
page_id = page['id']
|
|
273
|
+
try:
|
|
274
|
+
full_page = get_page(session, base, page_id)
|
|
275
|
+
save_page(full_page, space_key, args.dir)
|
|
276
|
+
return f'GET {page_id} {full_page.get("title", "")} (v{_ver(full_page)})'
|
|
277
|
+
except Exception as e:
|
|
278
|
+
errors += 1
|
|
279
|
+
return f'ERR {page_id} {page.get("title", "")}: {e}'
|
|
280
|
+
|
|
281
|
+
with ThreadPoolExecutor(max_workers=args.workers) as pool:
|
|
282
|
+
futures = {pool.submit(fetch_one, p): p for p in to_fetch}
|
|
283
|
+
for future in as_completed(futures):
|
|
284
|
+
print(future.result())
|
|
285
|
+
|
|
286
|
+
emit('DONE', f'{space_key}: {len(to_fetch)} fetched, {skipped} skipped, {errors} errors')
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def cmd_search(args):
|
|
290
|
+
if not os.path.isfile(args.index):
|
|
291
|
+
emit_error(f'Index not found: {args.index}')
|
|
292
|
+
sys.exit(1)
|
|
293
|
+
|
|
294
|
+
with open(args.index) as f:
|
|
295
|
+
index = json.load(f)
|
|
296
|
+
|
|
297
|
+
query = args.query.lower()
|
|
298
|
+
|
|
299
|
+
if isinstance(index, dict):
|
|
300
|
+
flat = []
|
|
301
|
+
for space_key, pages in index.items():
|
|
302
|
+
for p in pages:
|
|
303
|
+
p.setdefault('spaceKey', space_key)
|
|
304
|
+
flat.append(p)
|
|
305
|
+
else:
|
|
306
|
+
flat = index
|
|
307
|
+
|
|
308
|
+
results = [
|
|
309
|
+
p for p in flat
|
|
310
|
+
if query in p.get('title', '').lower() or query in str(p.get('id', ''))
|
|
311
|
+
]
|
|
312
|
+
|
|
313
|
+
for p in results:
|
|
314
|
+
print(f'{p["id"]} [{p.get("spaceKey", "?")}] {p.get("title", "")}')
|
|
315
|
+
|
|
316
|
+
if not results:
|
|
317
|
+
print('No results.', file=sys.stderr)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def cmd_index(args):
|
|
321
|
+
session, base = setup()
|
|
322
|
+
spaces = args.space if args.space else ['POL', 'COMPLY']
|
|
323
|
+
index = {}
|
|
324
|
+
|
|
325
|
+
for space_key in spaces:
|
|
326
|
+
space = get_space(session, base, key=space_key)
|
|
327
|
+
space_id = space['id']
|
|
328
|
+
print(f'Indexing {space_key}…', file=sys.stderr)
|
|
329
|
+
pages = list_pages(session, base, space_id)
|
|
330
|
+
|
|
331
|
+
index[space_key] = []
|
|
332
|
+
for page in pages:
|
|
333
|
+
index[space_key].append({
|
|
334
|
+
'id': page['id'],
|
|
335
|
+
'title': page.get('title', ''),
|
|
336
|
+
'parentId': page.get('parentId', ''),
|
|
337
|
+
'version': _ver(page),
|
|
338
|
+
'updatedAt': _ver_ts(page),
|
|
339
|
+
})
|
|
340
|
+
print(f' {space_key}: {len(pages)} pages', file=sys.stderr)
|
|
341
|
+
|
|
342
|
+
with open(args.output, 'w') as f:
|
|
343
|
+
json.dump(index, f, indent=2)
|
|
344
|
+
|
|
345
|
+
total = sum(len(v) for v in index.values())
|
|
346
|
+
emit('DONE', f'{total} pages indexed -> {args.output}')
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
# ---------------------------------------------------------------------------
|
|
350
|
+
# CLI
|
|
351
|
+
# ---------------------------------------------------------------------------
|
|
352
|
+
|
|
353
|
+
def main():
|
|
354
|
+
parser = argparse.ArgumentParser(
|
|
355
|
+
prog='confluence',
|
|
356
|
+
description='Confluence Cloud CLI — fast ADF page management',
|
|
357
|
+
)
|
|
358
|
+
parser.add_argument('--json', action='store_true', dest='json_output',
|
|
359
|
+
help='Output as JSON for programmatic parsing')
|
|
360
|
+
sub = parser.add_subparsers(dest='command', required=True)
|
|
361
|
+
|
|
362
|
+
p = sub.add_parser('get', help='Download a page (ADF + metadata)')
|
|
363
|
+
p.add_argument('page_id', help='Confluence page ID')
|
|
364
|
+
p.add_argument('--dir', default='pages', help='Output directory (default: pages)')
|
|
365
|
+
p.set_defaults(func=cmd_get)
|
|
366
|
+
|
|
367
|
+
p = sub.add_parser('put', help='Upload local ADF to Confluence')
|
|
368
|
+
p.add_argument('page_id', help='Confluence page ID')
|
|
369
|
+
p.add_argument('--dir', default='pages', help='Pages directory (default: pages)')
|
|
370
|
+
p.add_argument('--force', action='store_true', help='Skip version conflict check')
|
|
371
|
+
p.set_defaults(func=cmd_put)
|
|
372
|
+
|
|
373
|
+
p = sub.add_parser('diff', help='Compare local vs remote ADF')
|
|
374
|
+
p.add_argument('page_id', help='Confluence page ID')
|
|
375
|
+
p.add_argument('--dir', default='pages', help='Pages directory (default: pages)')
|
|
376
|
+
p.set_defaults(func=cmd_diff)
|
|
377
|
+
|
|
378
|
+
p = sub.add_parser('sync', help='Bulk-download all pages in a space')
|
|
379
|
+
p.add_argument('space_key', help='Space key (e.g. POL, COMPLY)')
|
|
380
|
+
p.add_argument('--dir', default='pages', help='Output directory (default: pages)')
|
|
381
|
+
p.add_argument('--workers', type=int, default=10, help='Parallel workers (default: 10)')
|
|
382
|
+
p.add_argument('--force', action='store_true', help='Re-download all, ignore cache')
|
|
383
|
+
p.set_defaults(func=cmd_sync)
|
|
384
|
+
|
|
385
|
+
p = sub.add_parser('search', help='Search local page index')
|
|
386
|
+
p.add_argument('query', help='Search term (title or ID)')
|
|
387
|
+
p.add_argument('--index', default='page-index.json', help='Index file path')
|
|
388
|
+
p.set_defaults(func=cmd_search)
|
|
389
|
+
|
|
390
|
+
p = sub.add_parser('index', help='Rebuild page-index.json from API')
|
|
391
|
+
p.add_argument('--space', action='append', help='Space key(s) to index (default: POL COMPLY)')
|
|
392
|
+
p.add_argument('--output', default='page-index.json', help='Output file (default: page-index.json)')
|
|
393
|
+
p.set_defaults(func=cmd_index)
|
|
394
|
+
|
|
395
|
+
args = parser.parse_args()
|
|
396
|
+
if args.json_output:
|
|
397
|
+
set_json_mode(True)
|
|
398
|
+
try:
|
|
399
|
+
args.func(args)
|
|
400
|
+
except APIError as e:
|
|
401
|
+
emit_error(str(e))
|
|
402
|
+
sys.exit(1)
|
|
403
|
+
except KeyboardInterrupt:
|
|
404
|
+
sys.exit(130)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
if __name__ == '__main__':
|
|
408
|
+
main()
|
atlassian_cli/http.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Shared HTTP helpers for API calls in the atlassian_cli package."""
|
|
2
|
+
|
|
3
|
+
class APIError(Exception):
|
|
4
|
+
def __init__(self, status, body):
|
|
5
|
+
self.status = status
|
|
6
|
+
self.body = body
|
|
7
|
+
|
|
8
|
+
def __str__(self):
|
|
9
|
+
return f'HTTP {self.status}: {self.body[:200]}'
|
|
10
|
+
|
|
11
|
+
def api_get(session, base, path, **params):
|
|
12
|
+
response = session.get(f'{base}{path}', params=params or None)
|
|
13
|
+
if response.ok:
|
|
14
|
+
return response.json()
|
|
15
|
+
raise APIError(response.status_code, response.text)
|
|
16
|
+
|
|
17
|
+
def api_post(session, base, path, data):
|
|
18
|
+
response = session.post(f'{base}{path}', json=data)
|
|
19
|
+
if response.ok:
|
|
20
|
+
return response.json()
|
|
21
|
+
raise APIError(response.status_code, response.text)
|
|
22
|
+
|
|
23
|
+
def api_put(session, base, path, data):
|
|
24
|
+
response = session.put(f'{base}{path}', json=data)
|
|
25
|
+
if response.ok:
|
|
26
|
+
return response.json()
|
|
27
|
+
raise APIError(response.status_code, response.text)
|
|
28
|
+
|
|
29
|
+
def api_delete(session, base, path):
|
|
30
|
+
response = session.delete(f'{base}{path}')
|
|
31
|
+
if response.status_code == 204:
|
|
32
|
+
return None
|
|
33
|
+
elif response.ok:
|
|
34
|
+
return response.json()
|
|
35
|
+
raise APIError(response.status_code, response.text)
|
atlassian_cli/jira.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Jira CLI — issue and assets management via REST APIs.
|
|
3
|
+
|
|
4
|
+
Commands:
|
|
5
|
+
issue Jira issue operations (CRUD, search, transitions)
|
|
6
|
+
assets Jira Assets/JSM operations (CRUD, schemas, types)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import sys
|
|
11
|
+
|
|
12
|
+
from atlassian_cli import jira_assets, jira_issues
|
|
13
|
+
from atlassian_cli.http import APIError
|
|
14
|
+
from atlassian_cli.output import emit_error, set_json_mode
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def main():
|
|
18
|
+
parser = argparse.ArgumentParser(
|
|
19
|
+
prog='jira',
|
|
20
|
+
description='Jira CLI — issue and assets management',
|
|
21
|
+
)
|
|
22
|
+
parser.add_argument('--json', action='store_true', dest='json_output',
|
|
23
|
+
help='Output as JSON for programmatic parsing')
|
|
24
|
+
|
|
25
|
+
sub = parser.add_subparsers(dest='domain', required=True)
|
|
26
|
+
|
|
27
|
+
# -----------------------------------------------------------------------
|
|
28
|
+
# issue subcommand
|
|
29
|
+
# -----------------------------------------------------------------------
|
|
30
|
+
issue_parser = sub.add_parser('issue', help='Jira issue operations')
|
|
31
|
+
issue_sub = issue_parser.add_subparsers(dest='command', required=True)
|
|
32
|
+
|
|
33
|
+
p = issue_sub.add_parser('get', help='Get issue details')
|
|
34
|
+
p.add_argument('key', help='Issue key (e.g. PROJ-123)')
|
|
35
|
+
p.set_defaults(func=jira_issues.cmd_get)
|
|
36
|
+
|
|
37
|
+
p = issue_sub.add_parser('create', help='Create an issue')
|
|
38
|
+
p.add_argument('project', help='Project key (e.g. PROJ)')
|
|
39
|
+
p.add_argument('type', help='Issue type (e.g. Task, Bug, Story)')
|
|
40
|
+
p.add_argument('summary', help='Issue summary/title')
|
|
41
|
+
p.add_argument('--description', help='Issue description')
|
|
42
|
+
p.add_argument('--labels', nargs='*', help='Labels')
|
|
43
|
+
p.add_argument('--assignee', help='Assignee account ID')
|
|
44
|
+
p.add_argument('--parent', help='Parent issue key (for sub-tasks)')
|
|
45
|
+
p.set_defaults(func=jira_issues.cmd_create)
|
|
46
|
+
|
|
47
|
+
p = issue_sub.add_parser('update', help='Update issue fields')
|
|
48
|
+
p.add_argument('key', help='Issue key')
|
|
49
|
+
p.add_argument('--summary', help='New summary')
|
|
50
|
+
p.add_argument('--description', help='New description')
|
|
51
|
+
p.add_argument('--labels', nargs='*', default=None, help='Replace labels')
|
|
52
|
+
p.add_argument('--assignee', help='Assignee account ID')
|
|
53
|
+
p.add_argument('--fields', help='JSON string of additional fields')
|
|
54
|
+
p.set_defaults(func=jira_issues.cmd_update)
|
|
55
|
+
|
|
56
|
+
p = issue_sub.add_parser('delete', help='Delete an issue')
|
|
57
|
+
p.add_argument('key', help='Issue key')
|
|
58
|
+
p.set_defaults(func=jira_issues.cmd_delete)
|
|
59
|
+
|
|
60
|
+
p = issue_sub.add_parser('search', help='Search issues with JQL')
|
|
61
|
+
p.add_argument('jql', help='JQL query string')
|
|
62
|
+
p.add_argument('--max', type=int, default=50, help='Max results (default: 50)')
|
|
63
|
+
p.add_argument('--fields', default='summary,status,assignee,issuetype',
|
|
64
|
+
help='Comma-separated fields to return')
|
|
65
|
+
p.set_defaults(func=jira_issues.cmd_search)
|
|
66
|
+
|
|
67
|
+
p = issue_sub.add_parser('transition', help='Transition issue to new status')
|
|
68
|
+
p.add_argument('key', help='Issue key')
|
|
69
|
+
p.add_argument('status', help='Target status name')
|
|
70
|
+
p.set_defaults(func=jira_issues.cmd_transition)
|
|
71
|
+
|
|
72
|
+
p = issue_sub.add_parser('comment', help='Add a comment')
|
|
73
|
+
p.add_argument('key', help='Issue key')
|
|
74
|
+
p.add_argument('body', help='Comment text')
|
|
75
|
+
p.set_defaults(func=jira_issues.cmd_comment)
|
|
76
|
+
|
|
77
|
+
p = issue_sub.add_parser('comments', help='List comments')
|
|
78
|
+
p.add_argument('key', help='Issue key')
|
|
79
|
+
p.set_defaults(func=jira_issues.cmd_comments)
|
|
80
|
+
|
|
81
|
+
# -----------------------------------------------------------------------
|
|
82
|
+
# assets subcommand
|
|
83
|
+
# -----------------------------------------------------------------------
|
|
84
|
+
assets_parser = sub.add_parser('assets', help='Jira Assets (JSM) operations')
|
|
85
|
+
assets_sub = assets_parser.add_subparsers(dest='command', required=True)
|
|
86
|
+
|
|
87
|
+
p = assets_sub.add_parser('search', help='Search objects with AQL')
|
|
88
|
+
p.add_argument('aql', help='AQL query string')
|
|
89
|
+
p.add_argument('--max', type=int, default=50, help='Max results')
|
|
90
|
+
p.set_defaults(func=jira_assets.cmd_search)
|
|
91
|
+
|
|
92
|
+
p = assets_sub.add_parser('get', help='Get object by ID')
|
|
93
|
+
p.add_argument('id', help='Object ID')
|
|
94
|
+
p.set_defaults(func=jira_assets.cmd_get)
|
|
95
|
+
|
|
96
|
+
p = assets_sub.add_parser('create', help='Create an object')
|
|
97
|
+
p.add_argument('type_id', help='Object type ID')
|
|
98
|
+
p.add_argument('attrs', nargs='+', help='Attributes as key=value pairs')
|
|
99
|
+
p.set_defaults(func=jira_assets.cmd_create)
|
|
100
|
+
|
|
101
|
+
p = assets_sub.add_parser('update', help='Update an object')
|
|
102
|
+
p.add_argument('id', help='Object ID')
|
|
103
|
+
p.add_argument('attrs', nargs='+', help='Attributes as key=value pairs')
|
|
104
|
+
p.set_defaults(func=jira_assets.cmd_update)
|
|
105
|
+
|
|
106
|
+
p = assets_sub.add_parser('delete', help='Delete an object')
|
|
107
|
+
p.add_argument('id', help='Object ID')
|
|
108
|
+
p.set_defaults(func=jira_assets.cmd_delete)
|
|
109
|
+
|
|
110
|
+
p = assets_sub.add_parser('schemas', help='List object schemas')
|
|
111
|
+
p.set_defaults(func=jira_assets.cmd_schemas)
|
|
112
|
+
|
|
113
|
+
p = assets_sub.add_parser('schema', help='Get schema details')
|
|
114
|
+
p.add_argument('id', help='Schema ID')
|
|
115
|
+
p.set_defaults(func=jira_assets.cmd_schema)
|
|
116
|
+
|
|
117
|
+
p = assets_sub.add_parser('types', help='List object types in a schema')
|
|
118
|
+
p.add_argument('schema_id', help='Schema ID')
|
|
119
|
+
p.set_defaults(func=jira_assets.cmd_types)
|
|
120
|
+
|
|
121
|
+
p = assets_sub.add_parser('type', help='Get object type details')
|
|
122
|
+
p.add_argument('id', help='Object type ID')
|
|
123
|
+
p.set_defaults(func=jira_assets.cmd_type)
|
|
124
|
+
|
|
125
|
+
p = assets_sub.add_parser('type-create', help='Create object type')
|
|
126
|
+
p.add_argument('schema_id', help='Schema ID')
|
|
127
|
+
p.add_argument('name', help='Type name')
|
|
128
|
+
p.add_argument('--description', help='Type description')
|
|
129
|
+
p.add_argument('--parent-type-id', help='Parent object type ID')
|
|
130
|
+
p.set_defaults(func=jira_assets.cmd_type_create)
|
|
131
|
+
|
|
132
|
+
p = assets_sub.add_parser('attrs', help='List attributes for a type')
|
|
133
|
+
p.add_argument('type_id', help='Object type ID')
|
|
134
|
+
p.set_defaults(func=jira_assets.cmd_attrs)
|
|
135
|
+
|
|
136
|
+
args = parser.parse_args()
|
|
137
|
+
if args.json_output:
|
|
138
|
+
set_json_mode(True)
|
|
139
|
+
try:
|
|
140
|
+
args.func(args)
|
|
141
|
+
except APIError as e:
|
|
142
|
+
emit_error(str(e))
|
|
143
|
+
sys.exit(1)
|
|
144
|
+
except KeyboardInterrupt:
|
|
145
|
+
sys.exit(130)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
if __name__ == '__main__':
|
|
149
|
+
main()
|
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""Jira Assets (JSM) commands — Assets REST API v1."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import sys
|
|
6
|
+
|
|
7
|
+
from atlassian_cli.config import setup
|
|
8
|
+
from atlassian_cli.http import APIError, api_delete, api_get, api_post, api_put
|
|
9
|
+
from atlassian_cli.output import emit, emit_error, emit_json
|
|
10
|
+
|
|
11
|
+
CACHE_FILE = '.atlassian-cache.json'
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _discover(session, base):
|
|
15
|
+
"""Discover cloudId and workspaceId. Results cached to disk."""
|
|
16
|
+
cache_path = os.path.join(os.getcwd(), CACHE_FILE)
|
|
17
|
+
if os.path.isfile(cache_path):
|
|
18
|
+
with open(cache_path) as f:
|
|
19
|
+
cache = json.load(f)
|
|
20
|
+
if cache.get('base') == base:
|
|
21
|
+
return cache['cloud_id'], cache['workspace_id']
|
|
22
|
+
|
|
23
|
+
# Step 1: cloudId
|
|
24
|
+
resp = session.get(f'{base}/_edge/tenant_info')
|
|
25
|
+
if not resp.ok:
|
|
26
|
+
raise APIError(resp.status_code, f'Failed to get cloudId: {resp.text}')
|
|
27
|
+
cloud_id = resp.json()['cloudId']
|
|
28
|
+
|
|
29
|
+
# Step 2: workspaceId
|
|
30
|
+
resp = session.get(f'{base}/rest/servicedeskapi/assets/workspace')
|
|
31
|
+
if not resp.ok:
|
|
32
|
+
raise APIError(resp.status_code, f'Failed to get workspaceId: {resp.text}')
|
|
33
|
+
workspace_id = resp.json()['values'][0]['workspaceId']
|
|
34
|
+
|
|
35
|
+
# Cache
|
|
36
|
+
with open(cache_path, 'w') as f:
|
|
37
|
+
json.dump({'base': base, 'cloud_id': cloud_id, 'workspace_id': workspace_id}, f)
|
|
38
|
+
|
|
39
|
+
return cloud_id, workspace_id
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _assets_base(session, base):
|
|
43
|
+
"""Return the Assets API base URL."""
|
|
44
|
+
_, workspace_id = _discover(session, base)
|
|
45
|
+
return f'https://api.atlassian.com/jsm/assets/workspace/{workspace_id}/v1'
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def assets_setup():
|
|
49
|
+
"""Return (session, site_base, assets_base_url)."""
|
|
50
|
+
session, base = setup()
|
|
51
|
+
ab = _assets_base(session, base)
|
|
52
|
+
return session, base, ab
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _parse_attrs(attr_list):
|
|
56
|
+
"""Parse key=value pairs into Assets API attribute format."""
|
|
57
|
+
attrs = []
|
|
58
|
+
for pair in attr_list:
|
|
59
|
+
if '=' not in pair:
|
|
60
|
+
emit_error(f'Invalid attribute format: {pair} (expected key=value)')
|
|
61
|
+
sys.exit(1)
|
|
62
|
+
key, value = pair.split('=', 1)
|
|
63
|
+
attrs.append({
|
|
64
|
+
'objectTypeAttributeId': key,
|
|
65
|
+
'objectAttributeValues': [{'value': value}],
|
|
66
|
+
})
|
|
67
|
+
return attrs
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# Note: Assets API uses a different base URL (api.atlassian.com), so we pass
|
|
71
|
+
# the full assets_base as 'base' and use relative paths.
|
|
72
|
+
|
|
73
|
+
def cmd_search(args):
|
|
74
|
+
session, _, ab = assets_setup()
|
|
75
|
+
data = api_post(session, ab, '/object/aql', {
|
|
76
|
+
'qlQuery': args.aql,
|
|
77
|
+
'resultPerPage': args.max,
|
|
78
|
+
'includeAttributes': True,
|
|
79
|
+
})
|
|
80
|
+
objects = data.get('values', data.get('objectEntries', []))
|
|
81
|
+
for obj in objects:
|
|
82
|
+
otype = obj.get('objectType', {}).get('name', '?')
|
|
83
|
+
print(f'{obj["id"]} [{otype}] {obj.get("label", "")}')
|
|
84
|
+
emit('DONE', f'{len(objects)} objects found')
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def cmd_get(args):
|
|
88
|
+
session, _, ab = assets_setup()
|
|
89
|
+
data = api_get(session, ab, f'/object/{args.id}')
|
|
90
|
+
emit_json(data)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def cmd_create(args):
|
|
94
|
+
session, _, ab = assets_setup()
|
|
95
|
+
data = api_post(session, ab, '/object/create', {
|
|
96
|
+
'objectTypeId': args.type_id,
|
|
97
|
+
'attributes': _parse_attrs(args.attrs),
|
|
98
|
+
})
|
|
99
|
+
emit('OK', f'Created object {data.get("id", "")} ({data.get("label", "")})')
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def cmd_update(args):
|
|
103
|
+
session, _, ab = assets_setup()
|
|
104
|
+
api_put(session, ab, f'/object/{args.id}', {
|
|
105
|
+
'attributes': _parse_attrs(args.attrs),
|
|
106
|
+
})
|
|
107
|
+
emit('OK', f'Updated object {args.id}')
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def cmd_delete(args):
|
|
111
|
+
session, _, ab = assets_setup()
|
|
112
|
+
api_delete(session, ab, f'/object/{args.id}')
|
|
113
|
+
emit('OK', f'Deleted object {args.id}')
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def cmd_schemas(args):
|
|
117
|
+
session, _, ab = assets_setup()
|
|
118
|
+
data = api_get(session, ab, '/objectschema/list')
|
|
119
|
+
schemas = data.get('values', data.get('objectschemas', []))
|
|
120
|
+
for s in schemas:
|
|
121
|
+
print(f'{s["id"]} {s["name"]}')
|
|
122
|
+
emit('DONE', f'{len(schemas)} schemas')
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def cmd_schema(args):
|
|
126
|
+
session, _, ab = assets_setup()
|
|
127
|
+
data = api_get(session, ab, f'/objectschema/{args.id}')
|
|
128
|
+
emit_json(data)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def cmd_types(args):
|
|
132
|
+
session, _, ab = assets_setup()
|
|
133
|
+
data = api_get(session, ab, f'/objectschema/{args.schema_id}/objecttypes/flat')
|
|
134
|
+
types = data if isinstance(data, list) else data.get('values', [])
|
|
135
|
+
for t in types:
|
|
136
|
+
print(f'{t["id"]} {t["name"]}')
|
|
137
|
+
emit('DONE', f'{len(types)} types')
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def cmd_type(args):
|
|
141
|
+
session, _, ab = assets_setup()
|
|
142
|
+
data = api_get(session, ab, f'/objecttype/{args.id}')
|
|
143
|
+
emit_json(data)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def cmd_type_create(args):
|
|
147
|
+
session, _, ab = assets_setup()
|
|
148
|
+
body = {'name': args.name, 'objectSchemaId': args.schema_id}
|
|
149
|
+
if hasattr(args, 'description') and args.description:
|
|
150
|
+
body['description'] = args.description
|
|
151
|
+
if hasattr(args, 'parent_type_id') and args.parent_type_id:
|
|
152
|
+
body['parentObjectTypeId'] = args.parent_type_id
|
|
153
|
+
data = api_post(session, ab, '/objecttype/create', body)
|
|
154
|
+
emit('OK', f'Created type {data.get("id", "")} ({args.name})')
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def cmd_attrs(args):
|
|
158
|
+
session, _, ab = assets_setup()
|
|
159
|
+
data = api_get(session, ab, f'/objecttype/{args.type_id}/attributes')
|
|
160
|
+
attrs = data if isinstance(data, list) else data.get('values', [])
|
|
161
|
+
for a in attrs:
|
|
162
|
+
req = '*' if a.get('minimumCardinality', 0) > 0 else ''
|
|
163
|
+
type_name = a.get('type', a.get('defaultType', {}).get('name', '?'))
|
|
164
|
+
if isinstance(type_name, dict):
|
|
165
|
+
type_name = type_name.get('name', '?')
|
|
166
|
+
print(f'{a["id"]} {a["name"]} ({type_name}) {req}'.rstrip())
|
|
167
|
+
emit('DONE', f'{len(attrs)} attributes')
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""Jira Cloud issue commands — REST API v3."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
from atlassian_cli.config import setup
|
|
7
|
+
from atlassian_cli.http import APIError, api_delete, api_get, api_post
|
|
8
|
+
from atlassian_cli.output import emit, emit_error
|
|
9
|
+
|
|
10
|
+
V3 = '/rest/api/3'
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _extract_text(adf_body):
|
|
14
|
+
"""Recursively extract plain text from an ADF body."""
|
|
15
|
+
def extract(node):
|
|
16
|
+
if node.get('type') == 'text':
|
|
17
|
+
return [node.get('text', '')]
|
|
18
|
+
parts = []
|
|
19
|
+
for child in node.get('content', []):
|
|
20
|
+
parts.extend(extract(child))
|
|
21
|
+
return parts
|
|
22
|
+
return ' '.join(extract(adf_body))
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _text_adf(text):
|
|
26
|
+
"""Wrap plain text in minimal ADF document."""
|
|
27
|
+
return {
|
|
28
|
+
'type': 'doc', 'version': 1,
|
|
29
|
+
'content': [{'type': 'paragraph', 'content': [{'type': 'text', 'text': text}]}],
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def cmd_get(args):
|
|
34
|
+
session, base = setup()
|
|
35
|
+
data = api_get(session, base, f'{V3}/issue/{args.key}')
|
|
36
|
+
fields = data.get('fields', {})
|
|
37
|
+
status = fields.get('status', {}).get('name', '?')
|
|
38
|
+
summary = fields.get('summary', '')
|
|
39
|
+
emit('OK', f'{data["key"]} [{status}] {summary}', data={'key': data['key'], 'id': data['id']})
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def cmd_create(args):
|
|
43
|
+
session, base = setup()
|
|
44
|
+
fields = {
|
|
45
|
+
'project': {'key': args.project},
|
|
46
|
+
'issuetype': {'name': args.type},
|
|
47
|
+
'summary': args.summary,
|
|
48
|
+
}
|
|
49
|
+
if args.description:
|
|
50
|
+
fields['description'] = _text_adf(args.description)
|
|
51
|
+
if args.labels:
|
|
52
|
+
fields['labels'] = args.labels
|
|
53
|
+
if args.assignee:
|
|
54
|
+
fields['assignee'] = {'accountId': args.assignee}
|
|
55
|
+
if args.parent:
|
|
56
|
+
fields['parent'] = {'key': args.parent}
|
|
57
|
+
|
|
58
|
+
result = api_post(session, base, f'{V3}/issue', {'fields': fields})
|
|
59
|
+
emit('OK', f'Created {result["key"]}', data=result)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def cmd_update(args):
|
|
63
|
+
session, base = setup()
|
|
64
|
+
fields = {}
|
|
65
|
+
if args.summary:
|
|
66
|
+
fields['summary'] = args.summary
|
|
67
|
+
if args.description:
|
|
68
|
+
fields['description'] = _text_adf(args.description)
|
|
69
|
+
if args.labels is not None:
|
|
70
|
+
fields['labels'] = args.labels
|
|
71
|
+
if args.assignee:
|
|
72
|
+
fields['assignee'] = {'accountId': args.assignee}
|
|
73
|
+
if args.fields:
|
|
74
|
+
fields.update(json.loads(args.fields))
|
|
75
|
+
|
|
76
|
+
if not fields:
|
|
77
|
+
emit_error('No fields to update')
|
|
78
|
+
sys.exit(1)
|
|
79
|
+
|
|
80
|
+
# Jira PUT /issue returns 204 No Content on success
|
|
81
|
+
resp = session.put(f'{base}{V3}/issue/{args.key}', json={'fields': fields})
|
|
82
|
+
if not resp.ok:
|
|
83
|
+
raise APIError(resp.status_code, resp.text)
|
|
84
|
+
emit('OK', f'Updated {args.key}')
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def cmd_delete(args):
|
|
88
|
+
session, base = setup()
|
|
89
|
+
api_delete(session, base, f'{V3}/issue/{args.key}')
|
|
90
|
+
emit('OK', f'Deleted {args.key}')
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def cmd_search(args):
|
|
94
|
+
session, base = setup()
|
|
95
|
+
data = api_post(session, base, f'{V3}/search/jql', {
|
|
96
|
+
'jql': args.jql,
|
|
97
|
+
'maxResults': args.max,
|
|
98
|
+
'fields': args.fields.split(','),
|
|
99
|
+
})
|
|
100
|
+
issues = data.get('issues', [])
|
|
101
|
+
for issue in issues:
|
|
102
|
+
f = issue.get('fields', {})
|
|
103
|
+
status = f.get('status', {}).get('name', '?')
|
|
104
|
+
summary = f.get('summary', '')
|
|
105
|
+
print(f'{issue["key"]} [{status}] {summary}')
|
|
106
|
+
emit('DONE', f'{len(issues)} issues found')
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def cmd_transition(args):
|
|
110
|
+
session, base = setup()
|
|
111
|
+
data = api_get(session, base, f'{V3}/issue/{args.key}/transitions')
|
|
112
|
+
transitions = data.get('transitions', [])
|
|
113
|
+
|
|
114
|
+
target = None
|
|
115
|
+
for t in transitions:
|
|
116
|
+
if (t['name'].lower() == args.status.lower()
|
|
117
|
+
or t.get('to', {}).get('name', '').lower() == args.status.lower()):
|
|
118
|
+
target = t
|
|
119
|
+
break
|
|
120
|
+
|
|
121
|
+
if not target:
|
|
122
|
+
available = ', '.join(t['name'] for t in transitions)
|
|
123
|
+
emit_error(f'No transition to "{args.status}". Available: {available}')
|
|
124
|
+
sys.exit(1)
|
|
125
|
+
|
|
126
|
+
api_post(session, base, f'{V3}/issue/{args.key}/transitions',
|
|
127
|
+
{'transition': {'id': target['id']}})
|
|
128
|
+
emit('OK', f'{args.key} -> {target["to"]["name"]}')
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def cmd_comment(args):
|
|
132
|
+
session, base = setup()
|
|
133
|
+
api_post(session, base, f'{V3}/issue/{args.key}/comment',
|
|
134
|
+
{'body': _text_adf(args.body)})
|
|
135
|
+
emit('OK', f'Comment added to {args.key}')
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def cmd_comments(args):
|
|
139
|
+
session, base = setup()
|
|
140
|
+
data = api_get(session, base, f'{V3}/issue/{args.key}/comment')
|
|
141
|
+
comments = data.get('comments', [])
|
|
142
|
+
for c in comments:
|
|
143
|
+
author = c.get('author', {}).get('displayName', '?')
|
|
144
|
+
date = c.get('created', '')[:16]
|
|
145
|
+
text = _extract_text(c.get('body', {}))[:100]
|
|
146
|
+
print(f'{author} ({date}): {text}')
|
|
147
|
+
emit('DONE', f'{len(comments)} comments')
|
atlassian_cli/output.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Provides shared output formatting for atlassian_cli package with both text and JSON modes."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
_json_mode = False
|
|
7
|
+
|
|
8
|
+
def set_json_mode(enabled: bool):
|
|
9
|
+
global _json_mode
|
|
10
|
+
_json_mode = enabled
|
|
11
|
+
|
|
12
|
+
def emit(prefix, message, data=None):
|
|
13
|
+
if _json_mode:
|
|
14
|
+
output_data = {'status': prefix.lower(), 'message': message}
|
|
15
|
+
if data:
|
|
16
|
+
output_data.update(data)
|
|
17
|
+
print(json.dumps(output_data))
|
|
18
|
+
else:
|
|
19
|
+
print(f'{prefix} {message}')
|
|
20
|
+
|
|
21
|
+
def emit_json(data):
|
|
22
|
+
print(json.dumps(data, indent=2))
|
|
23
|
+
|
|
24
|
+
def emit_error(message):
|
|
25
|
+
if _json_mode:
|
|
26
|
+
print(json.dumps({'status': 'error', 'message': message}), file=sys.stderr)
|
|
27
|
+
else:
|
|
28
|
+
print(f'ERR {message}', file=sys.stderr)
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: atlassian-cli
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Fast CLI tools for Atlassian Cloud (Confluence + Jira) — optimized for AI agents
|
|
5
|
+
Author-email: Alex Fishlock <alex.fishlock@catapult.cx>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/catapultcx/atlassian-cli
|
|
8
|
+
Project-URL: Repository, https://github.com/catapultcx/atlassian-cli
|
|
9
|
+
Project-URL: Issues, https://github.com/catapultcx/atlassian-cli/issues
|
|
10
|
+
Keywords: atlassian,confluence,jira,cli,claude,ai
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Environment :: Console
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
20
|
+
Classifier: Topic :: Utilities
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: requests>=2.28.0
|
|
25
|
+
Provides-Extra: dev
|
|
26
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
27
|
+
Requires-Dist: responses>=0.23.0; extra == "dev"
|
|
28
|
+
Requires-Dist: ruff>=0.4.0; extra == "dev"
|
|
29
|
+
Dynamic: license-file
|
|
30
|
+
|
|
31
|
+
<p align="center">
|
|
32
|
+
<h1 align="center">atlassian-cli</h1>
|
|
33
|
+
<p align="center">
|
|
34
|
+
Fast CLI tools for Atlassian Cloud — built for AI agents, loved by humans.
|
|
35
|
+
</p>
|
|
36
|
+
</p>
|
|
37
|
+
|
|
38
|
+
<p align="center">
|
|
39
|
+
<a href="https://github.com/catapultcx/atlassian-cli/actions/workflows/ci.yml"><img src="https://github.com/catapultcx/atlassian-cli/actions/workflows/ci.yml/badge.svg" alt="CI"></a>
|
|
40
|
+
<a href="https://pypi.org/project/atlassian-cli/"><img src="https://img.shields.io/pypi/v/atlassian-cli" alt="PyPI"></a>
|
|
41
|
+
<a href="https://pypi.org/project/atlassian-cli/"><img src="https://img.shields.io/pypi/pyversions/atlassian-cli" alt="Python"></a>
|
|
42
|
+
<a href="https://github.com/catapultcx/atlassian-cli/blob/main/LICENSE"><img src="https://img.shields.io/github/license/catapultcx/atlassian-cli" alt="License"></a>
|
|
43
|
+
</p>
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
Two CLI tools — `confluence` and `jira` — that talk directly to Atlassian Cloud REST APIs. Zero bloat, one dependency (`requests`), deterministic output that AI agents parse in a single shot.
|
|
48
|
+
|
|
49
|
+
## Install
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
pip install atlassian-cli
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Or from source:
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
pip install git+https://github.com/catapultcx/atlassian-cli.git
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Setup
|
|
62
|
+
|
|
63
|
+
Create a `.env` file (or export environment variables):
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
ATLASSIAN_URL=https://your-site.atlassian.net
|
|
67
|
+
ATLASSIAN_EMAIL=you@example.com
|
|
68
|
+
ATLASSIAN_TOKEN=your-api-token
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
Get your API token at https://id.atlassian.com/manage-profile/security/api-tokens
|
|
72
|
+
|
|
73
|
+
> Legacy `CONFLUENCE_URL` / `CONFLUENCE_EMAIL` / `CONFLUENCE_TOKEN` env vars are also supported.
|
|
74
|
+
|
|
75
|
+
## Confluence CLI
|
|
76
|
+
|
|
77
|
+
Manages Confluence pages as local JSON files in ADF (Atlassian Document Format). No markdown — ADF preserves every macro, panel, and table perfectly.
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
# Download a page
|
|
81
|
+
confluence get 9268920323
|
|
82
|
+
|
|
83
|
+
# Upload local edits back
|
|
84
|
+
confluence put 9268920323
|
|
85
|
+
confluence put 9268920323 --force # skip version check
|
|
86
|
+
|
|
87
|
+
# Compare local vs remote
|
|
88
|
+
confluence diff 9268920323
|
|
89
|
+
|
|
90
|
+
# Bulk-download an entire space (parallel, version-cached)
|
|
91
|
+
confluence sync POL
|
|
92
|
+
confluence sync COMPLY --workers 20 --force
|
|
93
|
+
|
|
94
|
+
# Search local page index (instant, no API call)
|
|
95
|
+
confluence search "risk assessment"
|
|
96
|
+
|
|
97
|
+
# Rebuild the page index
|
|
98
|
+
confluence index
|
|
99
|
+
confluence index --space POL --space COMPLY
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### How sync works
|
|
103
|
+
|
|
104
|
+
`sync` downloads every page in a space using parallel workers. It caches version numbers locally — subsequent syncs only fetch pages that changed. A full space of 500+ pages takes seconds.
|
|
105
|
+
|
|
106
|
+
```
|
|
107
|
+
pages/
|
|
108
|
+
POL/
|
|
109
|
+
9268920323.json # ADF body
|
|
110
|
+
9268920323.meta.json # title, version, timestamps
|
|
111
|
+
COMPLY/
|
|
112
|
+
5227515611.json
|
|
113
|
+
5227515611.meta.json
|
|
114
|
+
page-index.json # searchable index
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Jira CLI
|
|
118
|
+
|
|
119
|
+
### Issues
|
|
120
|
+
|
|
121
|
+
Full CRUD on Jira issues via REST API v3.
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
# Get issue details
|
|
125
|
+
jira issue get ISMS-42
|
|
126
|
+
|
|
127
|
+
# Create issues
|
|
128
|
+
jira issue create PROJ Task "Fix the login bug"
|
|
129
|
+
jira issue create PROJ Story "User auth" --description "As a user..." --labels security urgent
|
|
130
|
+
jira issue create PROJ Sub-task "Write tests" --parent PROJ-100
|
|
131
|
+
|
|
132
|
+
# Update fields
|
|
133
|
+
jira issue update ISMS-42 --summary "New title"
|
|
134
|
+
jira issue update ISMS-42 --labels risk compliance
|
|
135
|
+
jira issue update ISMS-42 --fields '{"priority": {"name": "High"}}'
|
|
136
|
+
|
|
137
|
+
# Delete
|
|
138
|
+
jira issue delete ISMS-42
|
|
139
|
+
|
|
140
|
+
# Search with JQL
|
|
141
|
+
jira issue search "project = ISMS AND status = Open"
|
|
142
|
+
jira issue search "assignee = currentUser() ORDER BY updated DESC" --max 20
|
|
143
|
+
|
|
144
|
+
# Transitions
|
|
145
|
+
jira issue transition ISMS-42 "In Progress"
|
|
146
|
+
jira issue transition ISMS-42 Done
|
|
147
|
+
|
|
148
|
+
# Comments
|
|
149
|
+
jira issue comment ISMS-42 "Fixed in v2.1"
|
|
150
|
+
jira issue comments ISMS-42
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Assets (JSM)
|
|
154
|
+
|
|
155
|
+
CRUD for Jira Service Management Assets via the Assets REST API v1.
|
|
156
|
+
|
|
157
|
+
```bash
|
|
158
|
+
# Browse schemas and types
|
|
159
|
+
jira assets schemas
|
|
160
|
+
jira assets schema 1
|
|
161
|
+
jira assets types 1
|
|
162
|
+
jira assets type 5
|
|
163
|
+
jira assets attrs 5
|
|
164
|
+
|
|
165
|
+
# Search with AQL
|
|
166
|
+
jira assets search "objectType = Server"
|
|
167
|
+
|
|
168
|
+
# CRUD objects
|
|
169
|
+
jira assets get 123
|
|
170
|
+
jira assets create 5 Name=srv01 IP=10.0.0.1
|
|
171
|
+
jira assets update 123 Name=srv02
|
|
172
|
+
jira assets delete 123
|
|
173
|
+
|
|
174
|
+
# Create new object types
|
|
175
|
+
jira assets type-create 1 "Network Device" --description "Switches and routers"
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
## `--json` flag
|
|
179
|
+
|
|
180
|
+
Both CLIs accept a global `--json` flag that switches all output to machine-readable JSON. Perfect for piping into `jq` or parsing from code.
|
|
181
|
+
|
|
182
|
+
```bash
|
|
183
|
+
# Text mode (default)
|
|
184
|
+
$ confluence get 9268920323
|
|
185
|
+
OK Artificial Intelligence Policy (v12) -> pages/POL/9268920323.json
|
|
186
|
+
|
|
187
|
+
# JSON mode
|
|
188
|
+
$ confluence --json get 9268920323
|
|
189
|
+
{"status":"ok","message":"Artificial Intelligence Policy (v12) -> pages/POL/9268920323.json"}
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
## Output format
|
|
193
|
+
|
|
194
|
+
All commands emit status-prefixed lines for easy parsing:
|
|
195
|
+
|
|
196
|
+
| Prefix | Meaning |
|
|
197
|
+
|--------|---------|
|
|
198
|
+
| `OK` | Success |
|
|
199
|
+
| `GET` | Page downloaded |
|
|
200
|
+
| `SKIP` | Already up-to-date |
|
|
201
|
+
| `ERR` | Error |
|
|
202
|
+
| `DONE` | Batch complete |
|
|
203
|
+
|
|
204
|
+
## Architecture
|
|
205
|
+
|
|
206
|
+
```
|
|
207
|
+
src/atlassian_cli/
|
|
208
|
+
config.py Shared auth, .env parsing, session factory
|
|
209
|
+
http.py API helpers: get/post/put/delete + error handling
|
|
210
|
+
output.py Text & JSON output formatting
|
|
211
|
+
conflu.py Confluence CLI (v2 API, ADF)
|
|
212
|
+
jira.py Jira CLI entry point (subparsers)
|
|
213
|
+
jira_issues.py Jira issue commands (v3 API)
|
|
214
|
+
jira_assets.py Jira Assets commands (Assets v1 API)
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
**APIs used:**
|
|
218
|
+
- Confluence Cloud REST API v2 (`/wiki/api/v2/`)
|
|
219
|
+
- Jira Cloud REST API v3 (`/rest/api/3/`)
|
|
220
|
+
- Jira Assets REST API v1 (`api.atlassian.com/jsm/assets/workspace/{id}/v1`)
|
|
221
|
+
|
|
222
|
+
## Development
|
|
223
|
+
|
|
224
|
+
```bash
|
|
225
|
+
git clone https://github.com/catapultcx/atlassian-cli.git
|
|
226
|
+
cd atlassian-cli
|
|
227
|
+
python -m venv .venv && source .venv/bin/activate
|
|
228
|
+
pip install -e ".[dev]"
|
|
229
|
+
pytest
|
|
230
|
+
ruff check src/ tests/
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
## License
|
|
234
|
+
|
|
235
|
+
MIT
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
atlassian_cli/__init__.py,sha256=hEMtjMFFOG0MdzHtBqJD1Afot-rMkCFG1IfGTuXQ6jk,80
|
|
2
|
+
atlassian_cli/config.py,sha256=RrKrhkIQrKYIveyDbNcK6hB7v85dJl5rXFTpZQDIrpU,2029
|
|
3
|
+
atlassian_cli/conflu.py,sha256=A6PyWDmMobifFrIRwN2T9IZoM6Xl383epCFPAnhsexU,13422
|
|
4
|
+
atlassian_cli/http.py,sha256=eKNR4MtNvO1I04_9SkIWSST7E1ql8mc8_OiYKABQ9_k,1147
|
|
5
|
+
atlassian_cli/jira.py,sha256=GMdRTaJQxK0dGAUe1xqzkCzJBacAKNRXdviAs_D1iLg,6233
|
|
6
|
+
atlassian_cli/jira_assets.py,sha256=sx9FbUjlhdzJeW_0aTh8epARVzwME65TUlLLw-GEFPY,5472
|
|
7
|
+
atlassian_cli/jira_issues.py,sha256=C9K4-TJGvLdyK2VycJz1Q1BlmCTxpeYPPfwI6MnHirk,4589
|
|
8
|
+
atlassian_cli/output.py,sha256=xC5e6L4UsnwMgqRHOv61ykr_ko8nS2k-FCVbESH2h8s,738
|
|
9
|
+
atlassian_cli-0.3.0.dist-info/licenses/LICENSE,sha256=N2En73-GfhLh4N3JFemhxauOoYUoc0QLC1loMpwDnCg,1068
|
|
10
|
+
atlassian_cli-0.3.0.dist-info/METADATA,sha256=wctTx1cyLqUZLgwNt8Hoh7nE8Ai3zas7mDo4nj2jLag,6840
|
|
11
|
+
atlassian_cli-0.3.0.dist-info/WHEEL,sha256=YCfwYGOYMi5Jhw2fU4yNgwErybb2IX5PEwBKV4ZbdBo,91
|
|
12
|
+
atlassian_cli-0.3.0.dist-info/entry_points.txt,sha256=fzzWod768AQuQu6s4ZQLWDdK9FVBfmgd3UHJUT19tWY,88
|
|
13
|
+
atlassian_cli-0.3.0.dist-info/top_level.txt,sha256=QuHsUMDGmyPIe55UShDMrvDeOuMVIkBQvOx4lDYavJk,14
|
|
14
|
+
atlassian_cli-0.3.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Catapult CX
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
atlassian_cli
|