teems 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +83 -39
- data/lib/teems/api/client.rb +1 -2
- data/lib/teems/api/meetings.rb +25 -0
- data/lib/teems/cli.rb +5 -1
- data/lib/teems/commands/base.rb +15 -2
- data/lib/teems/commands/help.rb +1 -0
- data/lib/teems/commands/meeting.rb +471 -0
- data/lib/teems/commands/meeting_recording.rb +296 -0
- data/lib/teems/commands/meeting_transcript.rb +241 -0
- data/lib/teems/runner.rb +6 -0
- data/lib/teems/services/api_client.rb +3 -3
- data/lib/teems/services/safari_js_runner.rb +89 -0
- data/lib/teems/version.rb +1 -1
- data/lib/teems.rb +3 -0
- metadata +6 -1
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Teems
|
|
4
|
+
module Commands
|
|
5
|
+
# Parses DASH MPD manifest XML to extract video/audio segment info
|
|
6
|
+
class DashManifestParser
|
|
7
|
+
# Describes a media track (video or audio) with its segment template info
|
|
8
|
+
Track = Data.define(:type, :init_url, :media_template, :segment_count, :timescale, :segments)
|
|
9
|
+
|
|
10
|
+
# A single media segment with start time and duration in timescale units
|
|
11
|
+
Segment = Data.define(:start, :duration)
|
|
12
|
+
|
|
13
|
+
ADAPTATION_RE = %r{<AdaptationSet[^>]*contentType="(video|audio)"(.*?)</AdaptationSet>}m
|
|
14
|
+
TEMPLATE_RE = /<SegmentTemplate[^>]*/
|
|
15
|
+
TIMELINE_RE = %r{<SegmentTimeline>(.*?)</SegmentTimeline>}m
|
|
16
|
+
SEGMENT_RE = /<S\s[^>]*>/
|
|
17
|
+
REP_RE = /<Representation[^>]*id="([^"]+)"/
|
|
18
|
+
|
|
19
|
+
def initialize(mpd_xml)
|
|
20
|
+
@xml = mpd_xml
|
|
21
|
+
@attrs = nil
|
|
22
|
+
@rep_id = nil
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def parse
|
|
26
|
+
tracks = []
|
|
27
|
+
@xml.scan(ADAPTATION_RE) do |type, body|
|
|
28
|
+
track = parse_adaptation(type, body)
|
|
29
|
+
tracks << track if track
|
|
30
|
+
end
|
|
31
|
+
tracks
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
private
|
|
35
|
+
|
|
36
|
+
def parse_adaptation(type, body)
|
|
37
|
+
template_match = body.match(TEMPLATE_RE)
|
|
38
|
+
return nil unless template_match
|
|
39
|
+
|
|
40
|
+
@attrs = template_match[0]
|
|
41
|
+
@rep_id = body.match(REP_RE)&.then { _1[1] } || ''
|
|
42
|
+
segments = parse_timeline(body)
|
|
43
|
+
return nil if segments.empty?
|
|
44
|
+
|
|
45
|
+
build_track(type, segments)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def build_track(type, segments)
|
|
49
|
+
timescale_val = extract_attr(@attrs, 'timescale').to_i
|
|
50
|
+
Track.new(type: type,
|
|
51
|
+
init_url: decode_template(extract_attr(@attrs, 'initialization')),
|
|
52
|
+
media_template: decode_template(extract_attr(@attrs, 'media')),
|
|
53
|
+
segment_count: segments.length,
|
|
54
|
+
timescale: timescale_val.positive? ? timescale_val : 1,
|
|
55
|
+
segments: segments)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def decode_template(url)
|
|
59
|
+
return '' unless url
|
|
60
|
+
|
|
61
|
+
url.gsub('&', '&').gsub('$RepresentationID$', @rep_id.to_s)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def parse_timeline(body)
|
|
65
|
+
timeline_match = body.match(TIMELINE_RE)
|
|
66
|
+
return [] unless timeline_match
|
|
67
|
+
|
|
68
|
+
segments = []
|
|
69
|
+
time = 0
|
|
70
|
+
timeline_match[1].scan(SEGMENT_RE) do |seg_tag|
|
|
71
|
+
time = parse_segment_tag(seg_tag, time, segments)
|
|
72
|
+
end
|
|
73
|
+
segments
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def parse_segment_tag(seg_tag, time, segments)
|
|
77
|
+
start_time = extract_attr(seg_tag, 't').to_i
|
|
78
|
+
duration = extract_attr(seg_tag, 'd').to_i
|
|
79
|
+
repeat_count = extract_attr(seg_tag, 'r').to_i
|
|
80
|
+
|
|
81
|
+
time = start_time if start_time.positive?
|
|
82
|
+
(repeat_count + 1).times do
|
|
83
|
+
segments << Segment.new(start: time, duration: duration)
|
|
84
|
+
time += duration
|
|
85
|
+
end
|
|
86
|
+
time
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def extract_attr(tag, name)
|
|
90
|
+
match = tag.match(/#{name}="([^"]*)"/)
|
|
91
|
+
match ? match[1] : nil
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
# Downloads DASH segments in parallel and assembles track data
|
|
96
|
+
module SegmentDownloader
|
|
97
|
+
PARALLEL_DOWNLOADS = 5
|
|
98
|
+
|
|
99
|
+
private
|
|
100
|
+
|
|
101
|
+
def download_track_segments(track, base_url)
|
|
102
|
+
urls = track.segments.map { |seg| resolve_url(base_url, segment_path(track, seg)) }
|
|
103
|
+
init = fetch_segment(resolve_url(base_url, track.init_url))
|
|
104
|
+
results = parallel_fetch(urls, track.segment_count)
|
|
105
|
+
String.new(init).tap { |data| results.each { |seg| data << seg } }
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def segment_path(track, seg)
|
|
109
|
+
track.media_template.gsub('$Time$', seg.start.to_s)
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
def parallel_fetch(urls, total)
|
|
113
|
+
init_seg_state(urls.length, total)
|
|
114
|
+
urls.each_with_index { |url, idx| @seg_state[:queue] << [url, idx] }
|
|
115
|
+
PARALLEL_DOWNLOADS.times.map { Thread.new { fetch_from_queue } }.each(&:join)
|
|
116
|
+
check_seg_errors
|
|
117
|
+
@seg_state[:results]
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def init_seg_state(count, total)
|
|
121
|
+
@seg_state = { results: Array.new(count), errors: [],
|
|
122
|
+
queue: Queue.new, total: total, mutex: Mutex.new }
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
def check_seg_errors
|
|
126
|
+
first_error = @seg_state[:errors].first
|
|
127
|
+
raise first_error if first_error
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def fetch_from_queue
|
|
131
|
+
loop do
|
|
132
|
+
item = @seg_state[:queue].pop(true)
|
|
133
|
+
@seg_state[:results][item[1]] = fetch_segment(item[0])
|
|
134
|
+
record_progress
|
|
135
|
+
rescue ThreadError
|
|
136
|
+
break
|
|
137
|
+
rescue StandardError => e
|
|
138
|
+
@seg_state[:mutex].synchronize { @seg_state[:errors] << e }
|
|
139
|
+
break
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
def record_progress
|
|
144
|
+
@seg_state[:mutex].synchronize do
|
|
145
|
+
print "\r Downloading: #{@seg_state[:results].count { _1 }}/#{@seg_state[:total]} segments"
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
def resolve_url(base_url, path)
|
|
150
|
+
return path if path.start_with?('http')
|
|
151
|
+
|
|
152
|
+
"#{base_url.sub(%r{/[^/]*$}, '/')}#{path}"
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def fetch_segment(url)
|
|
156
|
+
Net::HTTP.get_response(URI(url)).tap do |resp|
|
|
157
|
+
raise Teems::Error, "Segment download failed (#{resp.code})" unless resp.is_a?(Net::HTTPSuccess)
|
|
158
|
+
end.body
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
# Resolves recording file info by fetching embed page HTML directly
|
|
163
|
+
module RecordingResolver
|
|
164
|
+
include EmbedPageParser
|
|
165
|
+
|
|
166
|
+
private
|
|
167
|
+
|
|
168
|
+
def resolve_recording_file_info(sharing_url)
|
|
169
|
+
embed_url = fetch_embed_url(sharing_url)
|
|
170
|
+
return error('Could not get embed URL for recording') && nil unless embed_url
|
|
171
|
+
|
|
172
|
+
file_info = fetch_and_parse_embed(embed_url)
|
|
173
|
+
return error('Could not extract file info from embed page') && nil unless file_info&.dig(:transform_url)
|
|
174
|
+
|
|
175
|
+
{ transform_url: file_info[:transform_url], name: file_info[:name] || 'recording.mp4' }
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
# Downloads meeting recordings via DASH streaming from SharePoint
|
|
180
|
+
module MeetingRecording
|
|
181
|
+
include RecordingResolver
|
|
182
|
+
include SegmentDownloader
|
|
183
|
+
|
|
184
|
+
MANIFEST_PARAMS = 'format=dash&part=index'
|
|
185
|
+
|
|
186
|
+
private
|
|
187
|
+
|
|
188
|
+
def download_recording(target, classified)
|
|
189
|
+
sharing_url = target[:fileUrl] || classified[:recordings].filter_map { |rec| rec[:url] }.first
|
|
190
|
+
return error('No recordings found for this meeting') unless sharing_url
|
|
191
|
+
return error('ffmpeg is required. Install with: brew install ffmpeg') unless ffmpeg?
|
|
192
|
+
|
|
193
|
+
execute_recording_pipeline(sharing_url)
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
def execute_recording_pipeline(sharing_url)
|
|
197
|
+
info('Fetching recording via SharePoint...')
|
|
198
|
+
file_info = resolve_recording_file_info(sharing_url)
|
|
199
|
+
return 1 unless file_info
|
|
200
|
+
|
|
201
|
+
manifest = fetch_dash_manifest(file_info[:transform_url])
|
|
202
|
+
return error('Could not fetch DASH manifest') unless manifest
|
|
203
|
+
|
|
204
|
+
download_and_assemble(manifest)
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
def fetch_dash_manifest(transform_url)
|
|
208
|
+
url = build_manifest_url(transform_url)
|
|
209
|
+
debug("Fetching DASH manifest: #{url}")
|
|
210
|
+
fetch_manifest_content(url)
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
def build_manifest_url(transform_url)
|
|
214
|
+
url = transform_url.sub('/thumbnail', '/videomanifest')
|
|
215
|
+
separator = url.include?('?') ? '&' : '?'
|
|
216
|
+
"#{url}#{separator}#{MANIFEST_PARAMS}"
|
|
217
|
+
end
|
|
218
|
+
|
|
219
|
+
def fetch_manifest_content(url)
|
|
220
|
+
result = Net::HTTP.get_response(URI(url))
|
|
221
|
+
return result.body if result.is_a?(Net::HTTPSuccess)
|
|
222
|
+
|
|
223
|
+
debug("Manifest download failed: HTTP #{result.code}")
|
|
224
|
+
nil
|
|
225
|
+
rescue IOError, SystemCallError, SocketError, Timeout::Error, OpenSSL::SSL::SSLError => e
|
|
226
|
+
debug("Manifest download error: #{e.message}")
|
|
227
|
+
nil
|
|
228
|
+
end
|
|
229
|
+
|
|
230
|
+
def download_and_assemble(manifest_xml)
|
|
231
|
+
tracks = DashManifestParser.new(manifest_xml).parse
|
|
232
|
+
selected = select_tracks(tracks)
|
|
233
|
+
return error('No video/audio tracks found in manifest') unless selected
|
|
234
|
+
|
|
235
|
+
assemble_recording(selected, manifest_xml)
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
def select_tracks(tracks)
|
|
239
|
+
grouped = tracks.group_by(&:type)
|
|
240
|
+
video = grouped['video']&.first
|
|
241
|
+
audio = grouped['audio']&.first
|
|
242
|
+
video && audio ? { video: video, audio: audio } : nil
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
def assemble_recording(selected, manifest_xml)
|
|
246
|
+
@rec_dir = @options[:output_dir] || '.'
|
|
247
|
+
FileUtils.mkdir_p(@rec_dir)
|
|
248
|
+
@rec_base_url = manifest_xml.match(%r{<BaseURL>([^<]+)</BaseURL>})&.then { _1[1] } || ''
|
|
249
|
+
|
|
250
|
+
video_path = write_track(selected[:video], 'video')
|
|
251
|
+
audio_path = write_track(selected[:audio], 'audio')
|
|
252
|
+
merge_and_finalize(video_path, audio_path, @rec_dir)
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
def write_track(track, label)
|
|
256
|
+
info("Downloading #{label} track (#{track.segment_count} segments)...")
|
|
257
|
+
data = download_track_segments(track, @rec_base_url)
|
|
258
|
+
path = File.join(@rec_dir, ".teems_#{label}.mp4")
|
|
259
|
+
File.binwrite(path, data)
|
|
260
|
+
puts
|
|
261
|
+
path
|
|
262
|
+
end
|
|
263
|
+
|
|
264
|
+
def merge_and_finalize(video_path, audio_path, dir)
|
|
265
|
+
output_path = File.join(dir, 'recording.mp4')
|
|
266
|
+
info('Merging video and audio tracks...')
|
|
267
|
+
merged = run_ffmpeg('-i', video_path, '-i', audio_path, '-c', 'copy', output_path)
|
|
268
|
+
FileUtils.rm_f([video_path, audio_path])
|
|
269
|
+
|
|
270
|
+
return error('ffmpeg merge failed') unless merged
|
|
271
|
+
|
|
272
|
+
embed_subtitle_if_available(output_path)
|
|
273
|
+
success("Recording saved to #{output_path}")
|
|
274
|
+
end
|
|
275
|
+
|
|
276
|
+
def embed_subtitle_if_available(video_path)
|
|
277
|
+
vtt_path = Dir.glob(File.join(File.dirname(video_path), '*.vtt')).first
|
|
278
|
+
return unless vtt_path
|
|
279
|
+
|
|
280
|
+
info('Embedding transcript as subtitle track...')
|
|
281
|
+
final_path = video_path.sub('.mp4', '_subs.mp4')
|
|
282
|
+
return unless run_ffmpeg('-i', video_path, '-i', vtt_path, '-c', 'copy', '-c:s', 'mov_text', final_path)
|
|
283
|
+
|
|
284
|
+
FileUtils.mv(final_path, video_path)
|
|
285
|
+
end
|
|
286
|
+
|
|
287
|
+
def run_ffmpeg(*)
|
|
288
|
+
system('ffmpeg', '-y', *, out: File::NULL, err: File::NULL)
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
def ffmpeg?
|
|
292
|
+
system('which', 'ffmpeg', out: File::NULL, err: File::NULL)
|
|
293
|
+
end
|
|
294
|
+
end
|
|
295
|
+
end
|
|
296
|
+
end
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Teems
|
|
4
|
+
module Commands
|
|
5
|
+
# Parses SharePoint embed page HTML to extract file metadata
|
|
6
|
+
module EmbedPageParser
|
|
7
|
+
SP_ITEM_RE = %r{(https://[^/]+(?::\d+)?/.+?)/_api/v2\.\d+/drives/([^/]+)/items/([^/?]+)}
|
|
8
|
+
FILE_INFO_RE = /g_fileInfo\s*=\s*(\{.*?\});/m
|
|
9
|
+
|
|
10
|
+
private
|
|
11
|
+
|
|
12
|
+
def fetch_embed_url(sharing_url)
|
|
13
|
+
debug("Resolving sharing link: #{sharing_url}")
|
|
14
|
+
result = with_token_refresh { runner.meetings_api.share_preview(sharing_url) }
|
|
15
|
+
result['getUrl']
|
|
16
|
+
rescue ApiError => e
|
|
17
|
+
debug("Share preview failed: #{e.message}")
|
|
18
|
+
nil
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def fetch_and_parse_embed(embed_url)
|
|
22
|
+
debug("Fetching embed page: #{embed_url}")
|
|
23
|
+
html = fetch_embed_page(embed_url)
|
|
24
|
+
return nil unless html
|
|
25
|
+
|
|
26
|
+
parse_embed_file_info(html)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
def fetch_embed_page(url, limit: 3)
|
|
30
|
+
response = Net::HTTP.get_response(URI(url))
|
|
31
|
+
return response.body if response.is_a?(Net::HTTPSuccess)
|
|
32
|
+
return debug("Embed page failed: HTTP #{response.code}") unless response.is_a?(Net::HTTPRedirection)
|
|
33
|
+
return debug('Too many redirects fetching embed page') unless limit.positive?
|
|
34
|
+
|
|
35
|
+
fetch_embed_page(URI.join(url, response['location']).to_s, limit: limit - 1)
|
|
36
|
+
rescue IOError, SystemCallError, SocketError, Timeout::Error, URI::InvalidURIError, OpenSSL::SSL::SSLError => e
|
|
37
|
+
debug("Embed page fetch error: #{e.message}")
|
|
38
|
+
nil
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def parse_embed_file_info(html)
|
|
42
|
+
match = html.match(FILE_INFO_RE)
|
|
43
|
+
unless match
|
|
44
|
+
debug('g_fileInfo not found in embed page HTML')
|
|
45
|
+
return nil
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
data = JSON.parse(match[1])
|
|
49
|
+
build_file_info(data)
|
|
50
|
+
rescue JSON::ParserError => e
|
|
51
|
+
debug("Embed page JSON parse error: #{e.message}")
|
|
52
|
+
nil
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
def build_file_info(data)
|
|
56
|
+
extras = extract_extras(data)
|
|
57
|
+
extract_ids(data)&.merge(extras)
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def extract_extras(data)
|
|
61
|
+
{ drive_token: data['.driveAccessTokenV21'],
|
|
62
|
+
transform_url: data['.transformUrl'] || data['transformUrl'],
|
|
63
|
+
name: data['name'] }
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def extract_ids(data)
|
|
67
|
+
build_from_sp_item_url(data['.spItemUrl']) || build_from_fields(data)
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def build_from_sp_item_url(sp_url)
|
|
71
|
+
return nil unless sp_url
|
|
72
|
+
|
|
73
|
+
match = sp_url.match(SP_ITEM_RE)
|
|
74
|
+
match ? { site_url: match[1], drive_id: match[2], item_id: match[3] } : nil
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def build_from_fields(data)
|
|
78
|
+
drive_id = data.dig('libraryId', 'siteId') || data['driveId']
|
|
79
|
+
item_id = data['itemId'] || data['id']
|
|
80
|
+
site_url = data['siteUrl'] || data.dig('libraryId', 'siteUrl')
|
|
81
|
+
[drive_id, item_id, site_url].all? ? { drive_id: drive_id, item_id: item_id, site_url: site_url } : nil
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
# Converts JSON transcript entries to WebVTT with speaker names
|
|
86
|
+
class TranscriptFormatter
|
|
87
|
+
def initialize(entries)
|
|
88
|
+
@entries = entries
|
|
89
|
+
@cue = nil
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def to_vtt
|
|
93
|
+
cues = @entries.each_with_index.map { |entry, idx| format_cue(entry, idx) }
|
|
94
|
+
"WEBVTT\n\n#{cues.join}"
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
private
|
|
98
|
+
|
|
99
|
+
def format_cue(entry, idx)
|
|
100
|
+
@cue = entry
|
|
101
|
+
"#{idx + 1}\n#{cue_timestamps}\n<v #{@cue['speakerDisplayName']}>#{@cue['text']}</v>\n\n"
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def cue_timestamps
|
|
105
|
+
"#{truncate_ts(@cue['startOffset'])} --> #{truncate_ts(@cue['endOffset'])}"
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def truncate_ts(offset)
|
|
109
|
+
offset ? offset.to_s[0, 12] : '00:00:00.000'
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
# Downloads meeting transcripts via SharePoint API (no Safari required)
|
|
114
|
+
module MeetingTranscript
|
|
115
|
+
include EmbedPageParser
|
|
116
|
+
|
|
117
|
+
private
|
|
118
|
+
|
|
119
|
+
def download_transcript(target, classified)
|
|
120
|
+
sharing_url = target[:fileUrl] || first_recording_url(classified)
|
|
121
|
+
return error('No recording sharing link found for transcript download') unless sharing_url
|
|
122
|
+
|
|
123
|
+
execute_transcript_pipeline(sharing_url)
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
def first_recording_url(classified)
|
|
127
|
+
classified[:recordings].filter_map { |rec| rec[:url] }.first
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
def execute_transcript_pipeline(sharing_url)
|
|
131
|
+
info('Fetching transcript via SharePoint...')
|
|
132
|
+
embed_url = fetch_embed_url(sharing_url)
|
|
133
|
+
return error('Could not get embed URL from sharing link') unless embed_url
|
|
134
|
+
|
|
135
|
+
@transcript_info = fetch_and_parse_embed(embed_url)
|
|
136
|
+
return error('Could not extract file info from embed page') unless @transcript_info
|
|
137
|
+
|
|
138
|
+
transcript = resolve_transcript
|
|
139
|
+
return error('No transcripts found for this recording') unless transcript
|
|
140
|
+
|
|
141
|
+
save_transcript(transcript)
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
def resolve_transcript
|
|
145
|
+
url = build_transcripts_url
|
|
146
|
+
debug("Fetching transcripts from: #{url}")
|
|
147
|
+
fetch_transcript_list(url)
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def build_transcripts_url
|
|
151
|
+
"#{@transcript_info[:site_url]}/_api/v2.1" \
|
|
152
|
+
"/drives/#{@transcript_info[:drive_id]}" \
|
|
153
|
+
"/items/#{@transcript_info[:item_id]}/media/transcripts"
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
def fetch_transcript_list(url)
|
|
157
|
+
response = fetch_with_drive_token(url)
|
|
158
|
+
return nil unless response
|
|
159
|
+
|
|
160
|
+
parse_transcript_response(response)
|
|
161
|
+
rescue JSON::ParserError => e
|
|
162
|
+
debug("Transcript list JSON parse error: #{e.message}")
|
|
163
|
+
nil
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def fetch_with_drive_token(url)
|
|
167
|
+
result = Net::HTTP.get_response(URI(url), drive_token_headers)
|
|
168
|
+
return result.body if result.is_a?(Net::HTTPSuccess)
|
|
169
|
+
|
|
170
|
+
debug("Transcript list request failed: HTTP #{result.code}")
|
|
171
|
+
nil
|
|
172
|
+
rescue IOError, SystemCallError, SocketError, Timeout::Error, OpenSSL::SSL::SSLError => e
|
|
173
|
+
debug("Transcript list fetch error: #{e.message}")
|
|
174
|
+
nil
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
def drive_token_headers
|
|
178
|
+
token = @transcript_info[:drive_token]
|
|
179
|
+
debug('No drive token; request will be unauthenticated') unless token
|
|
180
|
+
hdrs = { 'Accept' => 'application/json' }
|
|
181
|
+
hdrs['Authorization'] = "Bearer #{token}" if token
|
|
182
|
+
hdrs
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
def parse_transcript_response(body)
|
|
186
|
+
parsed = JSON.parse(body)
|
|
187
|
+
api_error = parsed['error']
|
|
188
|
+
return debug("SharePoint API error: #{api_error}") if api_error
|
|
189
|
+
|
|
190
|
+
extract_transcript_entry(parsed)
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def extract_transcript_entry(parsed)
|
|
194
|
+
entry = (parsed['value'] || [parsed]).first
|
|
195
|
+
url = entry&.dig('temporaryDownloadUrl') || entry&.dig('downloadUrl')
|
|
196
|
+
url ? { url: url, name: File.basename(entry['name'] || 'transcript.vtt') } : nil
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
def save_transcript(transcript)
|
|
200
|
+
dir = @options[:output_dir] || '.'
|
|
201
|
+
FileUtils.mkdir_p(dir)
|
|
202
|
+
path = File.join(dir, File.basename(transcript[:name]))
|
|
203
|
+
|
|
204
|
+
info("Downloading transcript to #{path}...")
|
|
205
|
+
vtt = fetch_and_convert_transcript(transcript[:url])
|
|
206
|
+
return error('Failed to download transcript content') unless vtt
|
|
207
|
+
|
|
208
|
+
File.write(path, vtt)
|
|
209
|
+
success("Transcript saved to #{path}")
|
|
210
|
+
rescue SystemCallError => e
|
|
211
|
+
error("Could not save transcript: #{e.message}")
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
def fetch_and_convert_transcript(url)
|
|
215
|
+
json_url = url.include?('?') ? "#{url}&format=json" : "#{url}?format=json"
|
|
216
|
+
json_content = parse_transcript_json(fetch_transcript_content(json_url))
|
|
217
|
+
json_content ? TranscriptFormatter.new(json_content['entries']).to_vtt : fetch_transcript_content(url)
|
|
218
|
+
end
|
|
219
|
+
|
|
220
|
+
def parse_transcript_json(raw)
|
|
221
|
+
return nil unless raw
|
|
222
|
+
|
|
223
|
+
data = JSON.parse(raw)
|
|
224
|
+
data['entries'] ? data : nil
|
|
225
|
+
rescue JSON::ParserError
|
|
226
|
+
nil
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
def fetch_transcript_content(url)
|
|
230
|
+
response = Net::HTTP.get_response(URI(url))
|
|
231
|
+
return response.body if response.is_a?(Net::HTTPSuccess)
|
|
232
|
+
|
|
233
|
+
debug("Transcript download failed: HTTP #{response.code}")
|
|
234
|
+
nil
|
|
235
|
+
rescue IOError, SystemCallError, SocketError, Timeout::Error, OpenSSL::SSL::SSLError => e
|
|
236
|
+
debug("Transcript download error: #{e.message}")
|
|
237
|
+
nil
|
|
238
|
+
end
|
|
239
|
+
end
|
|
240
|
+
end
|
|
241
|
+
end
|
data/lib/teems/runner.rb
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
module Teems
|
|
4
4
|
# API factory methods for Runner, extracted to keep method count manageable
|
|
5
5
|
module ApiFactories
|
|
6
|
+
def meetings_api = Api::Meetings.new(api_client, account)
|
|
6
7
|
def channels_api = Api::Channels.new(api_client, account)
|
|
7
8
|
def chats_api = Api::Chats.new(api_client, account)
|
|
8
9
|
def messages_api = Api::Messages.new(api_client, account)
|
|
@@ -56,6 +57,11 @@ module Teems
|
|
|
56
57
|
Formatters::MessageFormatter.new(output: @output, cache_store: cache_store)
|
|
57
58
|
end
|
|
58
59
|
|
|
60
|
+
# Safari JS runner for SharePoint automation
|
|
61
|
+
def safari_js_runner
|
|
62
|
+
Services::SafariJsRunner.new(output: @output)
|
|
63
|
+
end
|
|
64
|
+
|
|
59
65
|
# Token extractor for Safari automation
|
|
60
66
|
def token_extractor(auth_mode: :default)
|
|
61
67
|
Services::TokenExtractor.new(output: @output, auth_mode: auth_mode)
|
|
@@ -162,9 +162,9 @@ module Teems
|
|
|
162
162
|
run_request(path, get_http_for_endpoint(endpoint_key)) { |http| http.request(req) }
|
|
163
163
|
end
|
|
164
164
|
|
|
165
|
-
def delete(
|
|
166
|
-
uri = URI(
|
|
167
|
-
run_request(
|
|
165
|
+
def delete(endpoint_key, path, account:)
|
|
166
|
+
uri = URI("#{resolve_endpoint(endpoint_key)}#{path}")
|
|
167
|
+
run_request(path, get_http_for_endpoint(endpoint_key)) do |http|
|
|
168
168
|
req = Net::HTTP::Delete.new(uri)
|
|
169
169
|
apply_auth(req, account, endpoint_key)
|
|
170
170
|
http.request(req)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'open3'
|
|
4
|
+
|
|
5
|
+
module Teems
|
|
6
|
+
module Services
|
|
7
|
+
# Runs JavaScript in Safari's current tab via AppleScript
|
|
8
|
+
class SafariJsRunner
|
|
9
|
+
def initialize(output: nil)
|
|
10
|
+
@output = output
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def available? = system('which', 'osascript', out: File::NULL, err: File::NULL)
|
|
14
|
+
|
|
15
|
+
def execute_js(js_code)
|
|
16
|
+
escaped = escape_js(js_code)
|
|
17
|
+
run_applescript(safari_js_script(escaped))
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def navigate(url)
|
|
21
|
+
run_applescript(navigate_script(url))
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def wait_for_load(timeout: 15)
|
|
25
|
+
timeout.times do
|
|
26
|
+
sleep 1
|
|
27
|
+
return if execute_js('document.readyState') == 'complete'
|
|
28
|
+
end
|
|
29
|
+
raise Teems::Error, 'Timed out waiting for page to load'
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def page_url
|
|
33
|
+
run_applescript(page_url_script)
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
private
|
|
37
|
+
|
|
38
|
+
def escape_js(js_code)
|
|
39
|
+
js_code.gsub('\\', '\\\\\\\\').gsub('"', '\\"').gsub("\n", '\\n')
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def safari_js_script(escaped_js)
|
|
43
|
+
<<~APPLESCRIPT
|
|
44
|
+
tell application "Safari"
|
|
45
|
+
if (count of windows) > 0 then
|
|
46
|
+
return do JavaScript "#{escaped_js}" in current tab of front window
|
|
47
|
+
end if
|
|
48
|
+
return ""
|
|
49
|
+
end tell
|
|
50
|
+
APPLESCRIPT
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def navigate_script(url)
|
|
54
|
+
safe = escape_for_applescript(url)
|
|
55
|
+
"tell application \"Safari\"\nactivate\n" \
|
|
56
|
+
"if (count of windows) = 0 then\nmake new document with properties {URL:\"#{safe}\"}\n" \
|
|
57
|
+
"else\nset URL of current tab of front window to \"#{safe}\"\n" \
|
|
58
|
+
"end if\nend tell\n"
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def escape_for_applescript(str)
|
|
62
|
+
str.gsub('\\', '\\\\\\\\').gsub('"', '\\"')
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def page_url_script
|
|
66
|
+
<<~APPLESCRIPT
|
|
67
|
+
tell application "Safari"
|
|
68
|
+
if (count of windows) > 0 then
|
|
69
|
+
return URL of current tab of front window
|
|
70
|
+
end if
|
|
71
|
+
return ""
|
|
72
|
+
end tell
|
|
73
|
+
APPLESCRIPT
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def run_applescript(script)
|
|
77
|
+
out, _err, status = Open3.capture3('osascript', '-e', script)
|
|
78
|
+
return nil unless status.success?
|
|
79
|
+
|
|
80
|
+
out.strip
|
|
81
|
+
rescue IOError, SystemCallError => e
|
|
82
|
+
log("AppleScript execution failed: #{e.message}")
|
|
83
|
+
nil
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def log(message) = @output&.debug(message)
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
data/lib/teems/version.rb
CHANGED
data/lib/teems.rb
CHANGED
|
@@ -67,6 +67,7 @@ module Teems
|
|
|
67
67
|
autoload :TeamsUrlParser, 'teems/services/teams_url_parser'
|
|
68
68
|
autoload :SyncStore, 'teems/services/sync_store'
|
|
69
69
|
autoload :SyncDirNaming, 'teems/services/sync_dir_naming'
|
|
70
|
+
autoload :SafariJsRunner, 'teems/services/safari_js_runner'
|
|
70
71
|
autoload :SyncEngine, 'teems/services/sync_engine'
|
|
71
72
|
end
|
|
72
73
|
|
|
@@ -93,6 +94,7 @@ module Teems
|
|
|
93
94
|
autoload :Who, 'teems/commands/who'
|
|
94
95
|
autoload :Ooo, 'teems/commands/ooo'
|
|
95
96
|
autoload :Org, 'teems/commands/org'
|
|
97
|
+
autoload :Meeting, 'teems/commands/meeting'
|
|
96
98
|
autoload :Status, 'teems/commands/status'
|
|
97
99
|
end
|
|
98
100
|
|
|
@@ -103,6 +105,7 @@ module Teems
|
|
|
103
105
|
autoload :Channels, 'teems/api/channels'
|
|
104
106
|
autoload :Chats, 'teems/api/chats'
|
|
105
107
|
autoload :Files, 'teems/api/files'
|
|
108
|
+
autoload :Meetings, 'teems/api/meetings'
|
|
106
109
|
autoload :Messages, 'teems/api/messages'
|
|
107
110
|
autoload :Users, 'teems/api/users'
|
|
108
111
|
end
|