1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-12-22 16:06:49 +00:00

Align PR with merged yt-dlp code

This commit is contained in:
dirkf 2023-05-01 15:14:38 +00:00 committed by GitHub
parent d1dbd37b09
commit b028b2fa27
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1,27 +1,21 @@
# coding: utf-8
from __future__ import unicode_literals
import sys
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_kwargs,
compat_str,
)
from ..utils import (
error_to_compat_str,
ExtractorError,
HEADRequest,
float_or_none,
GeoRestrictedError,
int_or_none,
merge_dicts,
parse_duration,
parse_iso8601,
smuggle_url,
traverse_obj,
update_url_query,
url_or_none,
variadic,
)
@ -31,7 +25,7 @@ class SBSIE(InfoExtractor):
https?://(?:www\.)?sbs\.com\.au/(?:
ondemand(?:
/video/(?:single/)?|
/movie/[^/]+/|
/(?:movie|tv-program)/[^/]+/|
/(?:tv|news)-series/(?:[^/]+/){3}|
.*?\bplay=|/watch/
)|news/(?:embeds/)?video/
@ -59,6 +53,8 @@ class SBSIE(InfoExtractor):
'timestamp': 1408613220,
'upload_date': '20140821',
'uploader': 'SBSC',
'tags': None,
'categories': None,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
@ -92,67 +88,11 @@ class SBSIE(InfoExtractor):
}, {
'url': 'https://www.sbs.com.au/ondemand/tv-series/the-handmaids-tale/season-5/the-handmaids-tale-s5-ep1/2065631811776',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/tv-program/autun-romes-forgotten-sister/2116212803602',
'only_matching': True,
}]
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err, video_id=video_id)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url, video_id, *args, **kwargs):
# note, errnote, fatal, encoding, data, headers, query, expected_status
# specialised to detect geo-block
errnote = args[2] if len(args) > 2 else kwargs.get('errnote')
fatal = args[3] if len(args) > 3 else kwargs.get('fatal')
exp = args[7] if len(args) > 7 else kwargs.get('expected_status')
# add 403 to expected codes for interception
exp = variadic(exp or [], allowed_types=(compat_str, ))
if 403 not in exp and '403' not in exp:
exp = list(exp)
exp.append(403)
else:
exp = None
if exp:
if len(args) > 7:
args = list(args)
args[7] = exp
else:
kwargs['expected_status'] = exp
kwargs = compat_kwargs(kwargs)
ret = super(SBSIE, self)._download_webpage_handle(url, video_id, *args, **kwargs)
if ret is False:
return ret
webpage, urlh = ret
if urlh.getcode() == 403:
if urlh.headers.get('x-error-reason') == 'geo-blocked':
countries = ['AU']
if fatal:
self.raise_geo_restricted(countries=countries)
err = GeoRestrictedError(
'This Australian content is not available from your location due to geo restriction',
countries=countries)
else:
err = compat_HTTPError(urlh.geturl(), 403, 'HTTP Error 403: Forbidden', urlh.headers, urlh)
ret = self.__handle_request_webpage_error(err, video_id, errnote, fatal)
if exp:
# caller doesn't expect 403
return False
return ret
def _extract_m3u8_formats(self, m3u8_url, video_id, *args, **kwargs):
# ext, entry_protocol, preference, m3u8_id, note, errnote, fatal,
# live, data, headers, query
@ -168,24 +108,28 @@ class SBSIE(InfoExtractor):
return super(SBSIE, self)._extract_m3u8_formats(m3u8_url, video_id, *args, **kwargs)
_GEO_COUNTRIES = ['AU']
# naming for exportability
AUS_TV_PARENTAL_GUIDELINES = {
'P': 0,
'C': 7,
'G': 0,
'PG': 0,
'M': 15,
'M': 14,
'MA15+': 15,
'AV15+': 15,
'MAV15+': 15,
'R18+': 18,
'NC': 0, # not classified (unofficial, used by SBS)
}
_PLAYER_API = 'https://www.sbs.com.au/api/v3'
_CATALOGUE_API = 'https://catalogue.pr.sbsod.com/'
_VOD_BASE_URL = 'https://sbs-vod-prod-01.akamaized.net/'
def _call_api(self, video_id, path, query=None, data=None, headers=None, fatal=True):
return self._download_json(update_url_query(
self._CATALOGUE_API + path, query),
video_id, headers=headers, fatal=fatal) or {}
video_id, headers=headers or {}, fatal=fatal) or {}
def _get_smil_url(self, video_id):
return update_url_query(
@ -194,13 +138,23 @@ class SBSIE(InfoExtractor):
def _get_player_data(self, video_id, headers=None, fatal=False):
return self._download_json(update_url_query(
self._PLAYER_API + 'video_stream', {'id': video_id, 'context': 'tv'}),
video_id, headers=headers, fatal=fatal) or {}
video_id, headers=headers or {}, fatal=fatal) or {}
def _real_extract(self, url):
video_id = self._match_id(url)
# get media links directly though later metadata may contain contentUrl
smil_url = self._get_smil_url(video_id)
formats = self._extract_smil_formats(smil_url, video_id, fatal=False) or []
if not formats:
urlh = self._request_webpage(
HEADRequest(self._VOD_BASE_URL), video_id,
note='Checking geo-restriction', fatal=False, expected_status=403)
if urlh:
error_reasons = urlh.headers.get_all('x-error-reason') or []
if 'geo-blocked' in error_reasons:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
# try for metadata from the same source
@ -231,72 +185,50 @@ class SBSIE(InfoExtractor):
return result
def traverse_media(*args, **kwargs):
nkwargs = None
if 'expected_type' not in kwargs:
kwargs['expected_type'] = txt_or_none
kwargs = compat_kwargs(kwargs)
nkwargs = kwargs
if 'get_all' not in kwargs:
kwargs['get_all'] = False
nkwargs = kwargs
if nkwargs:
kwargs = compat_kwargs(nkwargs)
return traverse_obj(media, *args, **kwargs)
return {
# For named episodes, use the catalogue's title to set episode, rather than generic 'Episode N'.
if traverse_media('partOfSeries', expected_type=dict):
media['epName'] = traverse_media('title')
return merge_dicts(*reversed(({
'id': video_id,
'title': traverse_media(('displayTitles', Ellipsis, 'title'),
get_all=False) or media['title'],
'formats': formats,
'description': traverse_media('description'),
'categories': traverse_media(
('genres', Ellipsis), ('taxonomy', ('genre', 'subgenre'), 'name')),
'tags': traverse_media(
(('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice')), Ellipsis)),
}, dict((k, traverse_media(v)) for k, v in {
'title': 'name',
'description': 'description',
'channel': ('taxonomy', 'channel', 'name'),
'series': ((('partOfSeries', 'name'), 'seriesTitle')),
'series_id': ((('partOfSeries', 'uuid'), 'seriesID')),
'episode': 'epName',
}.items()), {
'season_number': traverse_media((('partOfSeries', None), 'seasonNumber'), expected_type=int_or_none),
'episode_number': traverse_media('episodeNumber', expected_type=int_or_none),
'timestamp': traverse_media('datePublished', ('publication', 'startDate'),
expected_type=parse_iso8601),
'release_year': traverse_media('releaseYear', expected_type=int_or_none),
'duration': traverse_media('duration', expected_type=really_parse_duration),
'is_live': traverse_media('liveStream', expected_type=bool),
'age_limit': self.AUS_TV_PARENTAL_GUIDELINES.get(traverse_media(
'classificationID', 'contentRating', default='').upper()),
'categories': traverse_media(
('genres', Ellipsis), ('taxonomy', ('genre', 'subgenre'), 'name'),
get_all=True) or None,
'tags': traverse_media(
(('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice')), Ellipsis),
get_all=True) or None,
'thumbnails': traverse_media(('thumbnails', Ellipsis),
expected_type=xlate_thumb),
'duration': traverse_media('duration',
expected_type=really_parse_duration),
'series': traverse_media(('partOfSeries', 'name'), 'seriesTitle'),
'series_id': traverse_media(('partOfSeries', 'uuid'), 'seriesID'),
'season_number': traverse_media(
(('partOfSeries', None), 'seasonNumber'),
expected_type=int_or_none, get_all=False),
'episode_number': traverse_media('episodeNumber',
expected_type=int_or_none),
'release_year': traverse_media('releaseYear',
expected_type=int_or_none),
'timestamp': traverse_media(
'datePublished', ('publication', 'startDate'),
expected_type=parse_iso8601),
'channel': traverse_media(('taxonomy', 'channel', 'name')),
expected_type=xlate_thumb, get_all=True),
'formats': formats,
# TODO: _extract_smil_formats_and_subtitles()
# 'subtitles': subtitles,
'uploader': 'SBSC',
}
# just come behind the shed with me, mate
def _old_real_extract(self, url):
video_id = self._match_id(url)
player_params = self._download_json(
'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id)
error = player_params.get('error')
if error:
error_message = 'Sorry, The video you are looking for does not exist.'
video_data = error.get('results') or {}
error_code = error.get('errorCode')
if error_code == 'ComingSoon':
error_message = '%s is not yet available.' % video_data.get('title', '')
elif error_code in ('Forbidden', 'intranetAccessOnly'):
error_message = 'Sorry, This video cannot be accessed via this website'
elif error_code == 'Expired':
error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '')
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
media_url = traverse_obj(
player_params, ('releaseUrls', ('progressive', 'html', 'standard', 'htmlandroid')),
expected_type=url_or_none)
if not media_url:
raise ExtractorError('No', expected=True)
return {
'_type': 'url_transparent',
# 'ie_key': 'ThePlatform',
'id': video_id,
'url': smuggle_url(self._proto_relative_url(media_url), {'force_smil_url': True}),
'is_live': player_params.get('streamType') == 'live',
}
})))