mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-11-14 05:17:25 +00:00
[cda] Improve and simplify (Closes #8805)
This commit is contained in:
parent
8b0d7a66ef
commit
f1ced6df51
@ -12,9 +12,8 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class CDAIE(InfoExtractor):
|
class CDAIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:(?:www|ebd)\.)?cda\.pl/(?:video|[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
|
_VALID_URL = r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
|
||||||
_TESTS = [
|
_TESTS = [{
|
||||||
{
|
|
||||||
'url': 'http://www.cda.pl/video/5749950c',
|
'url': 'http://www.cda.pl/video/5749950c',
|
||||||
'md5': '6f844bf51b15f31fae165365707ae970',
|
'md5': '6f844bf51b15f31fae165365707ae970',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -24,8 +23,7 @@ class CDAIE(InfoExtractor):
|
|||||||
'title': 'Oto dlaczego przed zakrętem należy zwolnić.',
|
'title': 'Oto dlaczego przed zakrętem należy zwolnić.',
|
||||||
'duration': 39
|
'duration': 39
|
||||||
}
|
}
|
||||||
},
|
}, {
|
||||||
{
|
|
||||||
'url': 'http://www.cda.pl/video/57413289',
|
'url': 'http://www.cda.pl/video/57413289',
|
||||||
'md5': 'a88828770a8310fc00be6c95faf7f4d5',
|
'md5': 'a88828770a8310fc00be6c95faf7f4d5',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -34,8 +32,10 @@ class CDAIE(InfoExtractor):
|
|||||||
'title': 'Lądowanie na lotnisku na Maderze',
|
'title': 'Lądowanie na lotnisku na Maderze',
|
||||||
'duration': 137
|
'duration': 137
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
]
|
'url': 'http://ebd.cda.pl/0x0/5749950c',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@ -44,53 +44,53 @@ class CDAIE(InfoExtractor):
|
|||||||
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
||||||
raise ExtractorError('This video is only available for premium users.', expected=True)
|
raise ExtractorError('This video is only available for premium users.', expected=True)
|
||||||
|
|
||||||
title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title', fatal=False)
|
title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title')
|
||||||
|
|
||||||
def _get_format(page, version=''):
|
|
||||||
unpacked = decode_packed_codes(page)
|
|
||||||
duration = self._search_regex(r"duration:\\'(.+?)\\'", unpacked, 'duration', fatal=False)
|
|
||||||
format_id = None
|
|
||||||
height = None
|
|
||||||
|
|
||||||
m = re.search(r'<a data-quality="(?P<format_id>[^"]+)" href="[^"]+" class="quality-btn quality-btn-active">(?P<height>[0-9]+)p<\/a>', page)
|
|
||||||
if m:
|
|
||||||
format_id = m.group('format_id')
|
|
||||||
height = int(m.group('height'))
|
|
||||||
|
|
||||||
url = self._search_regex(r"url:\\'(.+?)\\'", unpacked, version + ' url', fatal=False)
|
|
||||||
if url is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return {
|
|
||||||
'format_id': format_id,
|
|
||||||
'height': height,
|
|
||||||
'url': url
|
|
||||||
}, parse_duration(duration)
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
format_desc, duration = _get_format(webpage) or (None, None)
|
info_dict = {
|
||||||
if format_desc is not None:
|
|
||||||
formats.append(format_desc)
|
|
||||||
|
|
||||||
pattern = re.compile(r'<a data-quality="[^"]+" href="([^"]+)" class="quality-btn">([0-9]+p)<\/a>')
|
|
||||||
for version in re.findall(pattern, webpage):
|
|
||||||
webpage = self._download_webpage(version[0], video_id, 'Downloading %s version information' % version[1], fatal=False)
|
|
||||||
if not webpage:
|
|
||||||
# Manually report warning because empty page is returned when invalid version is requested.
|
|
||||||
self.report_warning('Unable to download %s version information' % version[1])
|
|
||||||
continue
|
|
||||||
|
|
||||||
format_desc, duration_ = _get_format(webpage, version[1]) or (None, None)
|
|
||||||
duration = duration or duration_
|
|
||||||
if format_desc is not None:
|
|
||||||
formats.append(format_desc)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'duration': duration
|
'duration': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def extract_format(page, version):
|
||||||
|
unpacked = decode_packed_codes(page)
|
||||||
|
format_url = self._search_regex(
|
||||||
|
r"url:\\'(.+?)\\'", unpacked, '%s url' % version, fatal=False)
|
||||||
|
if not format_url:
|
||||||
|
return
|
||||||
|
f = {
|
||||||
|
'url': format_url,
|
||||||
|
}
|
||||||
|
m = re.search(
|
||||||
|
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
|
||||||
|
page)
|
||||||
|
if m:
|
||||||
|
f.update({
|
||||||
|
'format_id': m.group('format_id'),
|
||||||
|
'height': int(m.group('height')),
|
||||||
|
})
|
||||||
|
info_dict['formats'].append(f)
|
||||||
|
if not info_dict['duration']:
|
||||||
|
info_dict['duration'] = parse_duration(self._search_regex(
|
||||||
|
r"duration:\\'(.+?)\\'", unpacked, 'duration', fatal=False))
|
||||||
|
|
||||||
|
extract_format(webpage, 'default')
|
||||||
|
|
||||||
|
for href, resolution in re.findall(
|
||||||
|
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
|
||||||
|
webpage):
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
href, video_id, 'Downloading %s version information' % resolution, fatal=False)
|
||||||
|
if not webpage:
|
||||||
|
# Manually report warning because empty page is returned when
|
||||||
|
# invalid version is requested.
|
||||||
|
self.report_warning('Unable to download %s version information' % resolution)
|
||||||
|
continue
|
||||||
|
extract_format(webpage, resolution)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
Loading…
Reference in New Issue
Block a user