n2b 0.4.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,753 @@
1
+ require 'net/http'
2
+ require 'uri'
3
+ require 'json'
4
+ require 'base64'
5
+
6
+ module N2B
7
+ class JiraClient
8
+ # Placeholder for Jira API interaction error
9
+ class JiraApiError < StandardError; end
10
+
11
+ def initialize(config)
12
+ @config = config
13
+ @jira_config = @config['jira'] || {} # Ensure jira key exists
14
+
15
+ unless @jira_config['domain'] && @jira_config['email'] && @jira_config['api_key']
16
+ raise ArgumentError, "Jira domain, email, and API key must be configured in N2B settings."
17
+ end
18
+ # Handle domain that may or may not include protocol
19
+ domain = @jira_config['domain'].to_s.strip
20
+ if domain.start_with?('http://') || domain.start_with?('https://')
21
+ # Domain already includes protocol
22
+ @base_url = "#{domain.chomp('/')}/rest/api/3"
23
+ else
24
+ # Domain doesn't include protocol, add https://
25
+ @base_url = "https://#{domain.chomp('/')}/rest/api/3"
26
+ end
27
+ end
28
+
29
+ def fetch_ticket(ticket_key_or_url)
30
+ domain, ticket_key = parse_ticket_input(ticket_key_or_url)
31
+
32
+ unless ticket_key
33
+ raise JiraApiError, "Could not extract ticket key from '#{ticket_key_or_url}'."
34
+ end
35
+
36
+ puts "Fetching Jira ticket: #{ticket_key} from domain: #{domain || @jira_config['domain']}"
37
+ puts "Fetching ticket comments for additional context..."
38
+
39
+ begin
40
+ # Fetch ticket details
41
+ ticket_path = "/rest/api/3/issue/#{ticket_key}"
42
+ ticket_data = make_api_request('GET', ticket_path)
43
+
44
+ # Fetch ticket comments
45
+ comments_path = "/rest/api/3/issue/#{ticket_key}/comment"
46
+ comments_response = make_api_request('GET', comments_path)
47
+ comments_data = comments_response['comments'] || []
48
+
49
+ puts "Successfully fetched ticket and #{comments_data.length} comments"
50
+
51
+ # Process real data
52
+ return process_ticket_data(ticket_data, comments_data)
53
+
54
+ rescue JiraApiError => e
55
+ puts "⚠️ Failed to fetch from Jira API: #{e.message}"
56
+ puts "Falling back to dummy data for development..."
57
+ return fetch_dummy_ticket_data(ticket_key)
58
+ end
59
+ end
60
+
61
+ def update_ticket(ticket_key_or_url, comment)
62
+ _domain, ticket_key = parse_ticket_input(ticket_key_or_url) # Use _domain to indicate it's not used here
63
+
64
+ unless ticket_key
65
+ raise JiraApiError, "Could not extract ticket key from '#{ticket_key_or_url}' for update."
66
+ end
67
+
68
+ puts "Updating Jira ticket #{ticket_key} with analysis comment..."
69
+
70
+ # Prepare the comment body in Jira's Atlassian Document Format (ADF)
71
+ comment_body = {
72
+ "body" => format_comment_as_adf(comment)
73
+ }
74
+
75
+ # Make the API call to add a comment
76
+ path = "/rest/api/3/issue/#{ticket_key}/comment"
77
+ _response = make_api_request('POST', path, comment_body)
78
+
79
+ puts "✅ Successfully added comment to Jira ticket #{ticket_key}"
80
+ true
81
+ rescue JiraApiError => e
82
+ puts "❌ Failed to update Jira ticket #{ticket_key}: #{e.message}"
83
+ false
84
+ end
85
+
86
+ # Add test connection functionality
87
+ def test_connection
88
+ puts "🧪 Testing Jira API connection..."
89
+
90
+ begin
91
+ # Test 1: Basic authentication
92
+ response = make_api_request('GET', '/myself')
93
+ puts "✅ Authentication successful"
94
+ puts " Account: #{response['displayName']} (#{response['emailAddress']})"
95
+
96
+ # Test 2: Project access
97
+ projects = make_api_request('GET', '/project')
98
+ puts "✅ Can access #{projects.length} projects"
99
+
100
+ # Test 3: Comment permissions (try to get comments from any issue)
101
+ if projects.any?
102
+ project_key = projects.first['key']
103
+ puts "✅ Basic permissions verified for project: #{project_key}"
104
+ end
105
+
106
+ puts "🎉 Jira connection test successful!"
107
+ true
108
+ rescue => e
109
+ puts "❌ Jira connection test failed: #{e.message}"
110
+ false
111
+ end
112
+ end
113
+
114
+ def extract_requirements_from_description(description_string)
115
+ extracted_lines = []
116
+ in_requirements_section = false
117
+
118
+ # Headers that trigger requirement extraction. Case-insensitive.
119
+ # Jira often uses h1, h2, etc. for headers, or bold text.
120
+ # We'll look for lines that *start* with these, possibly after Jira's header markup like "hN. "
121
+ # Or common text like "Acceptance Criteria:", "Requirements:"
122
+ # Also include comment-specific implementation keywords
123
+ requirement_headers_regex = /^(h[1-6]\.\s*)?(Requirements|Acceptance Criteria|Tasks|Key Deliverables|Scope|User Stories|Implementation|Testing|Technical|Additional|Clarification|Comment \d+)/i
124
+
125
+ # Regex to identify common list item markers
126
+ _list_item_regex = /^\s*[\*\-\+]\s+/ # Unused but kept for potential future use
127
+ # Regex for lines that look like section headers (to stop capturing)
128
+ # This is a simple heuristic: a line with a few words, ending with a colon, or Jira hN. style
129
+ section_break_regex = /^(h[1-6]\.\s*)?\w+(\s+\w+){0,3}:?\s*$/i
130
+
131
+
132
+ description_string.to_s.each_line do |line| # Handle nil description_string
133
+ stripped_line = line.strip
134
+
135
+ if stripped_line.match?(requirement_headers_regex)
136
+ in_requirements_section = true
137
+ # Add the header itself to the extracted content if desired, or just use it as a trigger
138
+ # For now, let's add the line to give context.
139
+ extracted_lines << stripped_line
140
+ next # Move to the next line
141
+ end
142
+
143
+ if in_requirements_section
144
+ # If we encounter another significant header, stop capturing this section
145
+ # (unless it's another requirements header, which is fine)
146
+ if stripped_line.match?(section_break_regex) && !stripped_line.match?(requirement_headers_regex)
147
+ # Check if this new header is one of the requirement types. If so, continue.
148
+ # Otherwise, break. This logic is simplified: if it's any other header, stop.
149
+ is_another_req_header = false # Placeholder for more complex logic if needed
150
+ requirement_headers_regex.match(stripped_line) { is_another_req_header = true }
151
+
152
+ unless is_another_req_header
153
+ in_requirements_section = false # Stop capturing
154
+ # Potentially add a separator if concatenating multiple distinct sections later
155
+ # extracted_lines << "---"
156
+ next # Don't include this new non-req header in current section
157
+ else
158
+ # It's another requirement-related header, so add it and continue
159
+ extracted_lines << stripped_line
160
+ next
161
+ end
162
+ end
163
+
164
+ # Capture list items or general text within the section
165
+ # For now, we are quite inclusive of lines within a detected section.
166
+ # We could be more strict and only take list_item_regex lines,
167
+ # but often text paragraphs under a heading are relevant too.
168
+ extracted_lines << stripped_line unless stripped_line.empty?
169
+ end
170
+ end
171
+
172
+ if extracted_lines.empty?
173
+ # Fallback: return the entire description if no specific sections found
174
+ return description_string.to_s.strip # Handle nil and strip
175
+ else
176
+ # Join extracted lines and clean up excessive newlines
177
+ # Replace 3+ newlines with 2, and 2+ newlines with 2 (effectively max 2 newlines)
178
+ # Also, strip leading/trailing whitespace from the final result.
179
+ return extracted_lines.join("\n").gsub(/\n{3,}/, "\n\n").gsub(/\n{2,}/, "\n\n").strip
180
+ end
181
+ end
182
+
183
+ private
184
+
185
+ def process_ticket_data(ticket_data, comments_data)
186
+ # Format comments for inclusion
187
+ comments_section = format_comments_for_requirements(comments_data)
188
+
189
+ # Construct detailed description including comments
190
+ full_description_output = <<~FULL_OUTPUT
191
+ Ticket Key: #{ticket_data['key']}
192
+ Summary: #{ticket_data.dig('fields', 'summary')}
193
+ Status: #{ticket_data.dig('fields', 'status', 'name')}
194
+ Assignee: #{ticket_data.dig('fields', 'assignee', 'displayName') || 'Unassigned'}
195
+ Reporter: #{ticket_data.dig('fields', 'reporter', 'displayName')}
196
+ Priority: #{ticket_data.dig('fields', 'priority', 'name')}
197
+
198
+ --- Full Description ---
199
+ #{ticket_data.dig('fields', 'description')}
200
+
201
+ #{comments_section}
202
+ FULL_OUTPUT
203
+
204
+ # Extract requirements from both description and comments
205
+ # Handle description that might be in ADF format (Hash) or plain text (String)
206
+ raw_description = ticket_data.dig('fields', 'description')
207
+ description_content = if raw_description.is_a?(Hash)
208
+ # ADF format - extract text
209
+ extract_text_from_adf(raw_description)
210
+ elsif raw_description.is_a?(String)
211
+ # Plain text
212
+ raw_description
213
+ else
214
+ # Fallback
215
+ ""
216
+ end
217
+
218
+ combined_content = description_content + "\n\n" + comments_section
219
+ extracted_requirements = extract_requirements_from_description(combined_content)
220
+
221
+ # Return extracted requirements with context
222
+ if extracted_requirements != combined_content && !extracted_requirements.empty?
223
+ return "Ticket Key: #{ticket_data['key']}\nSummary: #{ticket_data.dig('fields', 'summary')}\n\n--- Extracted Requirements ---\n#{extracted_requirements}"
224
+ else
225
+ return full_description_output
226
+ end
227
+ end
228
+
229
+ def fetch_dummy_ticket_data(ticket_key)
230
+
231
+ # Enhanced dummy description for testing extraction (fallback only)
232
+ dummy_description_content = <<~DUMMY_JIRA_DESCRIPTION
233
+ This is some introductory text about the ticket.
234
+
235
+ h2. Overview
236
+ Some general overview of the task.
237
+
238
+ h3. Goals
239
+ * Achieve X
240
+ * Implement Y
241
+
242
+ h2. Requirements
243
+ Here are the key requirements for this ticket:
244
+ - Must handle user authentication.
245
+ - Should integrate with the payment gateway.
246
+ + Must log all transactions.
247
+ - User interface needs to be responsive.
248
+
249
+ Some more text after the first requirements list.
250
+
251
+ h2. Acceptance Criteria
252
+ The following criteria must be met:
253
+ * Feature A works as expected.
254
+ * Feature B is tested thoroughly.
255
+ * Sub-item for B.1
256
+ * Sub-item for B.2
257
+ - No critical bugs are present.
258
+
259
+ h3. Tasks
260
+ Here's a list of tasks:
261
+ 1. Design the database schema. (Note: numbered lists might not be explicitly extracted by simple list parsing but text under "Tasks" is)
262
+ 2. Develop the API endpoints.
263
+ - Sub-task 2.1
264
+ 3. Write unit tests.
265
+
266
+ Additional details and notes.
267
+
268
+ h2. Non-Relevant Section
269
+ This section should not be extracted.
270
+ - Item A
271
+ - Item B
272
+ DUMMY_JIRA_DESCRIPTION
273
+
274
+ # Simulate fetching comments with implementation details
275
+ dummy_comments = [
276
+ {
277
+ "author" => { "displayName" => "Product Manager" },
278
+ "created" => "2024-01-15T10:30:00.000Z",
279
+ "body" => "Additional clarification: The authentication should support both OAuth2 and API key methods. Please ensure backward compatibility with existing integrations."
280
+ },
281
+ {
282
+ "author" => { "displayName" => "Tech Lead" },
283
+ "created" => "2024-01-16T14:20:00.000Z",
284
+ "body" => "Implementation note: Use the new security library v2.1+ for the authentication module. The payment gateway integration should use the sandbox environment for testing. Database schema changes need migration scripts."
285
+ },
286
+ {
287
+ "author" => { "displayName" => "QA Engineer" },
288
+ "created" => "2024-01-17T09:15:00.000Z",
289
+ "body" => "Testing requirements:\n- Test with mobile devices (iOS/Android)\n- Verify responsive design on tablets\n- Load testing with 1000+ concurrent users\n- Security penetration testing required"
290
+ }
291
+ ]
292
+
293
+ dummy_data = {
294
+ "key" => ticket_key,
295
+ "fields" => {
296
+ "summary" => "This is a dummy summary for #{ticket_key}",
297
+ "description" => dummy_description_content, # Using the complex description
298
+ "status" => { "name" => "Open" },
299
+ "assignee" => { "displayName" => "Dummy User" },
300
+ "reporter" => { "displayName" => "Another Dummy" },
301
+ "priority" => { "name" => "Medium" }
302
+ }
303
+ }
304
+
305
+ # Format comments for inclusion
306
+ comments_section = format_comments_for_requirements(dummy_comments)
307
+
308
+ # Construct a more detailed "original" full description string including comments
309
+ full_description_output = <<~FULL_OUTPUT
310
+ Ticket Key: #{dummy_data['key']}
311
+ Summary: #{dummy_data['fields']['summary']}
312
+ Status: #{dummy_data['fields']['status']['name']}
313
+ Assignee: #{dummy_data['fields']['assignee']['displayName']}
314
+ Reporter: #{dummy_data['fields']['reporter']['displayName']}
315
+ Priority: #{dummy_data['fields']['priority']['name']}
316
+
317
+ --- Full Description ---
318
+ #{dummy_data['fields']['description']}
319
+
320
+ #{comments_section}
321
+ (Note: This is dummy data)
322
+ FULL_OUTPUT
323
+
324
+ # Now, extract requirements from both description and comments
325
+ combined_content = dummy_data['fields']['description'] + "\n\n" + comments_section
326
+ extracted_requirements = extract_requirements_from_description(combined_content)
327
+
328
+ # If requirements were extracted, prepend ticket key and summary for context.
329
+ # If not, the full description (which includes key, summary etc) is returned by extract_requirements_from_description as fallback.
330
+ if extracted_requirements != combined_content && !extracted_requirements.empty?
331
+ return "Ticket Key: #{dummy_data['key']}\nSummary: #{dummy_data['fields']['summary']}\n\n--- Extracted Requirements ---\n#{extracted_requirements}"
332
+ else
333
+ # Fallback: return the more detailed full output if no specific sections found,
334
+ # or if extracted requirements are empty.
335
+ return full_description_output
336
+ end
337
+ end
338
+
339
+ def format_comments_for_requirements(comments)
340
+ return "" if comments.nil? || comments.empty?
341
+
342
+ formatted_comments = ["--- Comments with Additional Context ---"]
343
+
344
+ comments.each_with_index do |comment, index|
345
+ author = comment.dig("author", "displayName") || "Unknown"
346
+ created = comment["created"] || "Unknown date"
347
+
348
+ # Handle both real Jira API format and dummy data format
349
+ body = if comment["body"].is_a?(String)
350
+ # Dummy data format or simple string
351
+ comment["body"]
352
+ elsif comment["body"].is_a?(Hash)
353
+ # Real Jira API format (ADF - Atlassian Document Format)
354
+ extract_text_from_adf(comment["body"])
355
+ else
356
+ ""
357
+ end
358
+
359
+ # Format date to be more readable
360
+ begin
361
+ if created != "Unknown date"
362
+ parsed_date = Time.parse(created)
363
+ formatted_date = parsed_date.strftime("%Y-%m-%d %H:%M")
364
+ else
365
+ formatted_date = created
366
+ end
367
+ rescue
368
+ formatted_date = created
369
+ end
370
+
371
+ formatted_comments << "\nComment #{index + 1} (#{author}, #{formatted_date}):"
372
+ formatted_comments << body.strip
373
+ end
374
+
375
+ formatted_comments.join("\n")
376
+ end
377
+
378
+ def extract_text_from_adf(adf_content)
379
+ # Simple extraction of text from Atlassian Document Format
380
+ return "" unless adf_content.is_a?(Hash)
381
+
382
+ text_parts = []
383
+ extract_text_recursive(adf_content, text_parts)
384
+ text_parts.join(" ")
385
+ end
386
+
387
+ def extract_text_recursive(node, text_parts)
388
+ if node.is_a?(Hash)
389
+ if node["type"] == "text" && node["text"]
390
+ text_parts << node["text"]
391
+ elsif node["content"].is_a?(Array)
392
+ node["content"].each { |child| extract_text_recursive(child, text_parts) }
393
+ end
394
+ elsif node.is_a?(Array)
395
+ node.each { |child| extract_text_recursive(child, text_parts) }
396
+ end
397
+ end
398
+
399
+ private
400
+
401
+ def format_comment_as_adf(comment_data)
402
+ # If comment_data is a string (legacy), convert to simple ADF
403
+ if comment_data.is_a?(String)
404
+ return {
405
+ "type" => "doc",
406
+ "version" => 1,
407
+ "content" => [
408
+ {
409
+ "type" => "paragraph",
410
+ "content" => [
411
+ {
412
+ "type" => "text",
413
+ "text" => comment_data
414
+ }
415
+ ]
416
+ }
417
+ ]
418
+ }
419
+ end
420
+
421
+ # If comment_data is structured (new format), build proper ADF
422
+ content = []
423
+
424
+ # Title with timestamp
425
+ timestamp = Time.now.strftime("%Y-%m-%d %H:%M UTC")
426
+ content << {
427
+ "type" => "heading",
428
+ "attrs" => { "level" => 2 },
429
+ "content" => [
430
+ {
431
+ "type" => "text",
432
+ "text" => "🤖 N2B Code Analysis Report",
433
+ "marks" => [{ "type" => "strong" }]
434
+ }
435
+ ]
436
+ }
437
+
438
+ content << {
439
+ "type" => "paragraph",
440
+ "content" => [
441
+ {
442
+ "type" => "text",
443
+ "text" => "Generated on #{timestamp}",
444
+ "marks" => [{ "type" => "em" }]
445
+ }
446
+ ]
447
+ }
448
+
449
+ # Implementation Summary (prominent)
450
+ impl_summary = comment_data[:implementation_summary]
451
+ if impl_summary && !impl_summary.empty?
452
+ content << {
453
+ "type" => "heading",
454
+ "attrs" => { "level" => 3 },
455
+ "content" => [
456
+ {
457
+ "type" => "text",
458
+ "text" => "✅ Implementation Summary",
459
+ "marks" => [{ "type" => "strong" }]
460
+ }
461
+ ]
462
+ }
463
+
464
+ content << {
465
+ "type" => "paragraph",
466
+ "content" => [
467
+ {
468
+ "type" => "text",
469
+ "text" => impl_summary
470
+ }
471
+ ]
472
+ }
473
+
474
+ content << { "type" => "rule" } # Horizontal line
475
+ else
476
+ # Fallback if no implementation summary
477
+ content << {
478
+ "type" => "paragraph",
479
+ "content" => [
480
+ {
481
+ "type" => "text",
482
+ "text" => "📝 Code analysis completed. See detailed findings below.",
483
+ "marks" => [{ "type" => "em" }]
484
+ }
485
+ ]
486
+ }
487
+ end
488
+
489
+ # Collapsible section for automated analysis
490
+ expand_content = []
491
+
492
+ # Technical Summary
493
+ if comment_data[:technical_summary] && !comment_data[:technical_summary].empty?
494
+ expand_content << {
495
+ "type" => "heading",
496
+ "attrs" => { "level" => 4 },
497
+ "content" => [
498
+ {
499
+ "type" => "text",
500
+ "text" => "🔧 Technical Changes",
501
+ "marks" => [{ "type" => "strong" }]
502
+ }
503
+ ]
504
+ }
505
+ expand_content << {
506
+ "type" => "paragraph",
507
+ "content" => [
508
+ {
509
+ "type" => "text",
510
+ "text" => comment_data[:technical_summary]
511
+ }
512
+ ]
513
+ }
514
+ end
515
+
516
+ # Issues
517
+ if comment_data[:issues] && comment_data[:issues].any?
518
+ expand_content << {
519
+ "type" => "heading",
520
+ "attrs" => { "level" => 4 },
521
+ "content" => [
522
+ {
523
+ "type" => "text",
524
+ "text" => "⚠️ Potential Issues",
525
+ "marks" => [{ "type" => "strong" }]
526
+ }
527
+ ]
528
+ }
529
+
530
+ list_items = comment_data[:issues].map do |issue|
531
+ {
532
+ "type" => "listItem",
533
+ "content" => [
534
+ {
535
+ "type" => "paragraph",
536
+ "content" => [
537
+ {
538
+ "type" => "text",
539
+ "text" => issue
540
+ }
541
+ ]
542
+ }
543
+ ]
544
+ }
545
+ end
546
+
547
+ expand_content << {
548
+ "type" => "bulletList",
549
+ "content" => list_items
550
+ }
551
+ end
552
+
553
+ # Improvements
554
+ if comment_data[:improvements] && comment_data[:improvements].any?
555
+ expand_content << {
556
+ "type" => "heading",
557
+ "attrs" => { "level" => 4 },
558
+ "content" => [
559
+ {
560
+ "type" => "text",
561
+ "text" => "💡 Suggested Improvements",
562
+ "marks" => [{ "type" => "strong" }]
563
+ }
564
+ ]
565
+ }
566
+
567
+ list_items = comment_data[:improvements].map do |improvement|
568
+ {
569
+ "type" => "listItem",
570
+ "content" => [
571
+ {
572
+ "type" => "paragraph",
573
+ "content" => [
574
+ {
575
+ "type" => "text",
576
+ "text" => improvement
577
+ }
578
+ ]
579
+ }
580
+ ]
581
+ }
582
+ end
583
+
584
+ expand_content << {
585
+ "type" => "bulletList",
586
+ "content" => list_items
587
+ }
588
+ end
589
+
590
+ # Test Coverage
591
+ if comment_data[:test_coverage] && !comment_data[:test_coverage].empty?
592
+ expand_content << {
593
+ "type" => "heading",
594
+ "attrs" => { "level" => 4 },
595
+ "content" => [
596
+ {
597
+ "type" => "text",
598
+ "text" => "🧪 Test Coverage",
599
+ "marks" => [{ "type" => "strong" }]
600
+ }
601
+ ]
602
+ }
603
+ expand_content << {
604
+ "type" => "paragraph",
605
+ "content" => [
606
+ {
607
+ "type" => "text",
608
+ "text" => comment_data[:test_coverage]
609
+ }
610
+ ]
611
+ }
612
+ end
613
+
614
+ # Requirements Evaluation
615
+ if comment_data[:requirements_evaluation] && !comment_data[:requirements_evaluation].empty?
616
+ expand_content << {
617
+ "type" => "heading",
618
+ "attrs" => { "level" => 4 },
619
+ "content" => [
620
+ {
621
+ "type" => "text",
622
+ "text" => "📋 Requirements Evaluation",
623
+ "marks" => [{ "type" => "strong" }]
624
+ }
625
+ ]
626
+ }
627
+ expand_content << {
628
+ "type" => "paragraph",
629
+ "content" => [
630
+ {
631
+ "type" => "text",
632
+ "text" => comment_data[:requirements_evaluation]
633
+ }
634
+ ]
635
+ }
636
+ end
637
+
638
+ # Add collapsible expand for automated analysis
639
+ if expand_content.any?
640
+ # Count the sections for a more informative title
641
+ sections = []
642
+ sections << "Technical Changes" if comment_data[:technical_summary] && !comment_data[:technical_summary].empty?
643
+ sections << "#{comment_data[:issues]&.length || 0} Issues" if comment_data[:issues]&.any?
644
+ sections << "#{comment_data[:improvements]&.length || 0} Improvements" if comment_data[:improvements]&.any?
645
+ sections << "Test Coverage" if comment_data[:test_coverage] && !comment_data[:test_coverage].empty?
646
+ sections << "Requirements" if comment_data[:requirements_evaluation] && !comment_data[:requirements_evaluation].empty?
647
+
648
+ title = sections.any? ? "🔍 Detailed Analysis: #{sections.join(', ')} (Click to expand)" : "🔍 Detailed Analysis (Click to expand)"
649
+
650
+ content << {
651
+ "type" => "expand",
652
+ "attrs" => {
653
+ "title" => title
654
+ },
655
+ "content" => expand_content
656
+ }
657
+ end
658
+
659
+ {
660
+ "type" => "doc",
661
+ "version" => 1,
662
+ "content" => content
663
+ }
664
+ end
665
+
666
+ def parse_ticket_input(ticket_key_or_url)
667
+ # Check if it's a URL
668
+ if ticket_key_or_url =~ /\Ahttps?:\/\//
669
+ uri = URI.parse(ticket_key_or_url)
670
+ # Standard Jira path: /browse/TICKET-KEY
671
+ # Or sometimes with query params: /browse/TICKET-KEY?someparam=value
672
+ match = uri.path.match(/\/browse\/([A-Z0-9]+-[0-9]+)/i)
673
+ if match && match[1]
674
+ parsed_domain = "#{uri.scheme}://#{uri.host}"
675
+ parsed_key = match[1].upcase
676
+ return [parsed_domain, parsed_key]
677
+ else
678
+ # Try to find key in query parameters (e.g., selectedIssue=TICKET-KEY)
679
+ if uri.query
680
+ query_params = URI.decode_www_form(uri.query).to_h
681
+ key_from_query = query_params['selectedIssue'] || query_params['issueKey'] || query_params['issue']
682
+ if key_from_query && key_from_query.match?(/^[A-Z0-9]+-[0-9]+$/i)
683
+ parsed_domain = "#{uri.scheme}://#{uri.host}"
684
+ return [parsed_domain, key_from_query.upcase]
685
+ end
686
+ end
687
+ # If path doesn't match /browse/ and not found in common query params
688
+ raise JiraApiError, "Could not parse Jira ticket key from URL: #{ticket_key_or_url}. Expected format like '.../browse/PROJECT-123'."
689
+ end
690
+ elsif ticket_key_or_url.match?(/^[A-Z0-9]+-[0-9]+$/i)
691
+ # It's just a ticket key, use configured domain
692
+ # The domain for the API call will be derived from @base_url in the actual API call methods
693
+ return [nil, ticket_key_or_url.upcase] # Return nil for domain to signify using default
694
+ else
695
+ raise JiraApiError, "Invalid Jira ticket key format: '#{ticket_key_or_url}'. Expected 'PROJECT-123' or a valid Jira URL."
696
+ end
697
+ rescue URI::InvalidURIError
698
+ raise JiraApiError, "Invalid URL format: #{ticket_key_or_url}"
699
+ end
700
+
701
+ def make_api_request(method, path, body = nil)
702
+ # Construct the full URL properly
703
+ # @base_url = "https://domain.atlassian.net/rest/api/3"
704
+ # path = "/rest/api/3/issue/KEY-123" or "issue/KEY-123"
705
+
706
+ if path.start_with?('/rest/api/3/')
707
+ # Path already includes the full API path, use the domain only
708
+ # Extract just the domain part: "https://domain.atlassian.net"
709
+ domain_url = @base_url.gsub(/\/rest\/api\/3.*$/, '')
710
+ full_url = "#{domain_url}#{path}"
711
+ else
712
+ # Path is relative to the API base
713
+ full_url = "#{@base_url.chomp('/')}/#{path.sub(/^\//, '')}"
714
+ end
715
+
716
+ uri = URI.parse(full_url)
717
+
718
+ http = Net::HTTP.new(uri.host, uri.port)
719
+ http.use_ssl = (uri.scheme == 'https')
720
+ http.read_timeout = 30
721
+ http.open_timeout = 10
722
+
723
+ request = case method.upcase
724
+ when 'GET'
725
+ Net::HTTP::Get.new(uri.request_uri)
726
+ when 'POST'
727
+ req = Net::HTTP::Post.new(uri.request_uri)
728
+ req.body = body.to_json if body
729
+ req
730
+ # Add other methods (PUT, DELETE) as needed
731
+ else
732
+ raise JiraApiError, "Unsupported HTTP method: #{method}"
733
+ end
734
+
735
+ request['Authorization'] = "Basic #{Base64.strict_encode64("#{@jira_config['email']}:#{@jira_config['api_key']}")}"
736
+ request['Content-Type'] = 'application/json'
737
+ request['Accept'] = 'application/json'
738
+
739
+ response = http.request(request)
740
+
741
+ unless response.is_a?(Net::HTTPSuccess)
742
+ error_message = "Jira API Error: #{response.code} #{response.message}"
743
+ error_message += " - #{response.body}" if response.body && !response.body.empty?
744
+ raise JiraApiError, error_message
745
+ end
746
+
747
+ response.body.empty? ? {} : JSON.parse(response.body)
748
+ rescue Timeout::Error, Errno::EINVAL, Errno::ECONNRESET, EOFError,
749
+ Net::HTTPBadResponse, Net::HTTPHeaderSyntaxError, Net::ProtocolError, Errno::ECONNREFUSED => e
750
+ raise JiraApiError, "Jira API request failed: #{e.class} - #{e.message}"
751
+ end
752
+ end
753
+ end