mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-12-22 16:06:49 +00:00
Compare commits
2 Commits
7e6845d0d6
...
92d29b22fc
Author | SHA1 | Date | |
---|---|---|---|
|
92d29b22fc | ||
|
f70619bfe5 |
@ -64,6 +64,9 @@ def _get_suitable_downloader(info_dict, params={}):
|
|||||||
if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False:
|
if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False:
|
||||||
return FFmpegFD
|
return FFmpegFD
|
||||||
|
|
||||||
|
if params.get('_clip_args'):
|
||||||
|
return FFmpegFD
|
||||||
|
|
||||||
return PROTOCOL_MAP.get(protocol, HttpFD)
|
return PROTOCOL_MAP.get(protocol, HttpFD)
|
||||||
|
|
||||||
|
|
||||||
|
@ -469,6 +469,10 @@ class FFmpegFD(ExternalFD):
|
|||||||
elif isinstance(conn, compat_str):
|
elif isinstance(conn, compat_str):
|
||||||
args += ['-rtmp_conn', conn]
|
args += ['-rtmp_conn', conn]
|
||||||
|
|
||||||
|
ss = self.params.get('_clip_args', '').split(',')
|
||||||
|
if len(ss) == 2:
|
||||||
|
args += ['-ss', ss[0], '-to', ss[1]]
|
||||||
|
|
||||||
args += ['-i', url, '-c', 'copy']
|
args += ['-i', url, '-c', 'copy']
|
||||||
|
|
||||||
if self.params.get('test', False):
|
if self.params.get('test', False):
|
||||||
|
@ -2344,8 +2344,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
player_response,
|
player_response,
|
||||||
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
|
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
|
||||||
if pctr:
|
if pctr:
|
||||||
def process_language(container, base_url, lang_code, query):
|
|
||||||
lang_subs = []
|
def process_language(container, base_url, lang_code, sub_name, query):
|
||||||
|
lang_subs = container.setdefault(lang_code, [])
|
||||||
for fmt in self._SUBTITLE_FORMATS:
|
for fmt in self._SUBTITLE_FORMATS:
|
||||||
query.update({
|
query.update({
|
||||||
'fmt': fmt,
|
'fmt': fmt,
|
||||||
@ -2353,29 +2354,49 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
lang_subs.append({
|
lang_subs.append({
|
||||||
'ext': fmt,
|
'ext': fmt,
|
||||||
'url': update_url_query(base_url, query),
|
'url': update_url_query(base_url, query),
|
||||||
|
'name': sub_name,
|
||||||
})
|
})
|
||||||
container[lang_code] = lang_subs
|
|
||||||
|
|
||||||
subtitles = {}
|
def get_lang_code(track):
|
||||||
for caption_track in (pctr.get('captionTracks') or []):
|
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
|
||||||
|
or track.get('languageCode'))
|
||||||
|
|
||||||
|
def get_text(x):
|
||||||
|
return try_get(x, (lambda x: x['simpleText'],
|
||||||
|
lambda x: x['runs'][0]['text']), compat_str)
|
||||||
|
|
||||||
|
subtitles, automatic_captions = {}, {}
|
||||||
|
for lang_code, caption_track in dict(
|
||||||
|
(get_lang_code(sub), sub) for sub in (pctr.get('captionTracks') or [])).items():
|
||||||
base_url = caption_track.get('baseUrl')
|
base_url = caption_track.get('baseUrl')
|
||||||
if not base_url:
|
if not base_url:
|
||||||
continue
|
continue
|
||||||
|
lang_name = get_text(caption_track) or lang_code
|
||||||
if caption_track.get('kind') != 'asr':
|
if caption_track.get('kind') != 'asr':
|
||||||
lang_code = caption_track.get('languageCode')
|
|
||||||
if not lang_code:
|
if not lang_code:
|
||||||
continue
|
continue
|
||||||
process_language(
|
process_language(
|
||||||
subtitles, base_url, lang_code, {})
|
subtitles, base_url, lang_code, lang_name, {})
|
||||||
|
if not caption_track.get('isTranslatable'):
|
||||||
continue
|
continue
|
||||||
automatic_captions = {}
|
for trans_code, trans_name in dict(
|
||||||
for translation_language in (pctr.get('translationLanguages') or []):
|
(lang.get('languageCode'), get_text(lang.get('languageName')))
|
||||||
translation_language_code = translation_language.get('languageCode')
|
for lang in pctr.get('translationLanguages') or []).items():
|
||||||
if not translation_language_code:
|
if not trans_code:
|
||||||
continue
|
continue
|
||||||
|
if caption_track.get('kind') != 'asr':
|
||||||
|
trans_code += '-' + lang_code
|
||||||
|
trans_name += ' from ' + lang_name
|
||||||
|
# Add "-orig" label to the original language so that it can be distinguished
|
||||||
|
# The subs are returned without "-orig" as well for compatibility
|
||||||
|
if lang_code == 'a-' + trans_code:
|
||||||
process_language(
|
process_language(
|
||||||
automatic_captions, base_url, translation_language_code,
|
automatic_captions, base_url, trans_code + '-orig', trans_name + ' (Original)', {})
|
||||||
{'tlang': translation_language_code})
|
# Setting tlang=lang returns damaged subtitles.
|
||||||
|
# Not using lang_code == f'a-{trans_code}' here for future-proofing
|
||||||
|
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
|
||||||
|
process_language(automatic_captions, base_url, trans_code, trans_name,
|
||||||
|
{} if orig_lang == trans_code else {'tlang': trans_code})
|
||||||
info['automatic_captions'] = automatic_captions
|
info['automatic_captions'] = automatic_captions
|
||||||
info['subtitles'] = subtitles
|
info['subtitles'] = subtitles
|
||||||
|
|
||||||
@ -3456,6 +3477,38 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
|||||||
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
|
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
|
||||||
'identity token', default=None)
|
'identity token', default=None)
|
||||||
|
|
||||||
|
def _process_clip(self, url, data):
|
||||||
|
renderer = try_get(
|
||||||
|
data,
|
||||||
|
lambda x: next(
|
||||||
|
(r for r in (s.get('engagementPanelSectionListRenderer')
|
||||||
|
for s in x['engagementPanels'])
|
||||||
|
if (r or {}).get('panelIdentifier') == 'engagement-panel-clip-view'),
|
||||||
|
None), dict)
|
||||||
|
if not renderer:
|
||||||
|
return
|
||||||
|
clip_commands = try_get(
|
||||||
|
renderer,
|
||||||
|
lambda x: x['content']['clipSectionRenderer']['contents'][0]['clipAttributionRenderer']['onScrubExit']['commandExecutorCommand']['commands'],
|
||||||
|
list) or []
|
||||||
|
clip_open = try_get(
|
||||||
|
clip_commands,
|
||||||
|
lambda x: next((c['openPopupAction'] for c in x if c.get('openPopupAction')), None),
|
||||||
|
dict)
|
||||||
|
clip_open_commands = try_get(
|
||||||
|
clip_open,
|
||||||
|
lambda x: x['popup']['notificationActionRenderer']['actionButton']['buttonRenderer']['command']['commandExecutorCommand']['commands'],
|
||||||
|
list) or []
|
||||||
|
clip_data = try_get(
|
||||||
|
clip_open_commands,
|
||||||
|
lambda x: next((c['loopCommand'] for c in x if c.get('loopCommand')), None),
|
||||||
|
dict)
|
||||||
|
if clip_data.get('postId') in url:
|
||||||
|
clip_times = (int_or_none(clip_data.get('startTimeMs')),
|
||||||
|
int_or_none(clip_data.get('endTimeMs')))
|
||||||
|
if None not in clip_times:
|
||||||
|
self._downloader.params['_clip_args'] = '%gms,%gms' % clip_times
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
item_id = self._match_id(url)
|
item_id = self._match_id(url)
|
||||||
url = update_url(url, netloc='www.youtube.com')
|
url = update_url(url, netloc='www.youtube.com')
|
||||||
@ -3484,6 +3537,8 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
|||||||
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
|
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
|
||||||
compat_str) or video_id
|
compat_str) or video_id
|
||||||
if video_id:
|
if video_id:
|
||||||
|
if item_id == 'clip':
|
||||||
|
self._process_clip(url, data)
|
||||||
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
|
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
|
||||||
# Capture and output alerts
|
# Capture and output alerts
|
||||||
alert = self._extract_alert(data)
|
alert = self._extract_alert(data)
|
||||||
|
Loading…
Reference in New Issue
Block a user