1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-06-26 00:27:57 +00:00

Compare commits

...

14 Commits

Author SHA1 Message Date
dirkf
7f26bf0251
Merge ab5617be9e into 0153b387e5 2024-06-14 09:21:55 +00:00
Paper
0153b387e5
[VidLii] Add 720p support (#30924)
* [VidLii] Add HD support  (yt-dlp backport-ish)

* Also fix a bug with the view count

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-06-11 13:21:39 +01:00
dirkf
a48fe7491d [ORF] Skip tests with limited availability 2024-06-11 12:52:13 +01:00
dirkf
e20ca543f0 [ORF] Re-factor and updateORFFM4StoryIE
* fix getting media via DASH instead of inaccessible mp4
* also get in-page YT media
2024-06-11 12:52:13 +01:00
dirkf
e39466051f [ORF] Support sound.orf.at, updating ORFRadioIE
* maintain support for xx.orf.at/player/... URLs
* add `ORFRadioCollectionIE` to support playlists in ORF Sound
* back-port and re-work `ORFPodcastIE` from https://github.com/yt-dlp/yt-dlp/pull/8486, thx Esokrates
2024-06-11 12:52:13 +01:00
dirkf
d95c0d203f [ORF] Support on.orf.at, replacing ORFTVthekIE
* add `ORFONIE`, back-porting yt-dlp PR https://github.com/yt-dlp/yt-dlp/pull/9113 and friends: thx HobbyistDev, TuxCoder, seproDev
* re-factor to support livestreams via new `ORFONliveIE`
2024-06-11 12:52:13 +01:00
dirkf
3bde6a5752 [test] Improve download test
* skip reason can't be unicode in Py2
* remove duplicate assert...Equal functions
2024-06-11 12:52:13 +01:00
dirkf
50f6c5668a [core] Re-factor with _fill_common_fields() as used in yt-dlp 2024-06-11 12:52:13 +01:00
dirkf
b4ff08bd2d [core] Safer handling of nested playlist data 2024-06-11 12:52:13 +01:00
kmnx
88bd8b9f87
[mixcloud] updated mixcloud API server address (#32557)
* updated mixcloud API server address
* fix tests
* etc

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-06-11 12:38:24 +01:00
dirkf
ab5617be9e
Update youtube_dl/extractor/sbs.py 2023-05-01 16:22:29 +01:00
dirkf
b028b2fa27
Align PR with merged yt-dlp code 2023-05-01 15:14:38 +00:00
dirkf
d1dbd37b09 [SBS] Updates for review and rethink 2023-03-19 12:14:07 +00:00
dirkf
bcb8143f1d [extractor/sbs] Overhaul extractor for new APIs 2023-03-19 01:41:27 +00:00
8 changed files with 975 additions and 600 deletions

View File

@ -5,9 +5,9 @@ import hashlib
import json
import os.path
import re
import types
import ssl
import sys
import types
import unittest
import youtube_dl.extractor
@ -181,18 +181,18 @@ def expect_value(self, got, expected, field):
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
assert_func = assertGreaterEqual
assert_func = self.assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
assert_func = assertLessEqual
assert_func = self.assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
assert_func = assertEqual
assert_func = self.assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
self, len(got), expected_num,
len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def assertLessEqual(self, got, expected, msg=None):
if not (got <= expected):
if msg is None:
msg = '%r not less than or equal to %r' % (got, expected)
self.assertTrue(got <= expected, msg)
def assertEqual(self, got, expected, msg=None):
if not (got == expected):
if msg is None:
msg = '%r not equal to %r' % (got, expected)
self.assertTrue(got == expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning

View File

@ -9,8 +9,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
assertLessEqual,
expect_warnings,
get_params,
gettestcases,
@ -36,12 +34,20 @@ from youtube_dl.utils import (
ExtractorError,
error_to_compat_str,
format_bytes,
IDENTITY,
preferredencoding,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
# Some unittest APIs require actual str
if not isinstance('TEST', str):
_encode_str = lambda s: s.encode(preferredencoding())
else:
_encode_str = IDENTITY
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
@ -102,7 +108,7 @@ def generator(test_case, tname):
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
self.skipTest(reason)
self.skipTest(_encode_str(reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
@ -187,16 +193,14 @@ def generator(test_case, tname):
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
self.assertGreaterEqual(
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_maxcount' in test_case:
assertLessEqual(
self,
self.assertLessEqual(
len(res_dict['entries']),
test_case['playlist_maxcount'],
'Expected at most %d in playlist %s, but got %d' % (
@ -243,8 +247,8 @@ def generator(test_case, tname):
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
self.assertGreaterEqual(
got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))

View File

@ -1039,8 +1039,8 @@ class YoutubeDL(object):
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
webpage_url = ie_result.get('webpage_url') # not all pl/mv have this
if webpage_url and webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
@ -1048,6 +1048,10 @@ class YoutubeDL(object):
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
if new_result:
new_result.update(ie_result)
ie_result = new_result
try:
return self.__process_playlist(ie_result, download)
finally:
@ -1593,6 +1597,28 @@ class YoutubeDL(object):
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _fill_common_fields(self, info_dict, final=True):
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
if final:
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
@ -1660,24 +1686,7 @@ class YoutubeDL(object):
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
self._fill_common_fields(info_dict)
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)

View File

@ -898,21 +898,13 @@ from .ooyala import (
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFONIE,
ORFONLiveIE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
ORFPodcastIE,
ORFRadioIE,
ORFRadioCollectionIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (

View File

@ -1,3 +1,4 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
@ -10,7 +11,7 @@ from ..compat import (
compat_ord,
compat_str,
compat_urllib_parse_unquote,
compat_zip
compat_zip as zip,
)
from ..utils import (
int_or_none,
@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
lookup_key = object_type + 'Lookup'
return self._download_json(
'https://www.mixcloud.com/graphql', display_id, query={
'https://app.mixcloud.com/graphql', display_id, query={
'query': '''{
%s(lookup: {username: "%s"%s}) {
%s
@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
'ext': 'm4a',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
'uploader': 'Daniel Holbach',
'uploader': 'dholbach', # was: 'Daniel Holbach',
'uploader_id': 'dholbach',
'thumbnail': r're:https?://.*\.jpg',
'view_count': int,
@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
'ext': 'mp3',
'title': 'Caribou 7 inch Vinyl Mix & Chat',
'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
'timestamp': 1422987057,
'upload_date': '20150203',
},
'params': {
'skip_download': '404 not found',
},
}, {
'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
'info_dict': {
'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
'ext': 'm4a',
'title': 'Carnival Música Popular Brasileira Mix',
'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
'timestamp': 1454347174,
'upload_date': '20160201',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
'view_count': int,
},
}, {
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
'only_matching': True,
@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([
compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url):
username, slug = re.match(self._VALID_URL, url).groups()
username, slug = self._match_valid_url(url).groups()
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
track_id = '%s_%s' % (username, slug)

File diff suppressed because it is too large Load Diff

View File

@ -2,32 +2,62 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_kwargs,
compat_str,
)
from ..utils import (
smuggle_url,
ExtractorError,
HEADRequest,
float_or_none,
int_or_none,
merge_dicts,
parse_duration,
parse_iso8601,
traverse_obj,
update_url_query,
url_or_none,
)
class SBSIE(InfoExtractor):
IE_DESC = 'sbs.com.au'
_VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=|/watch/)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
_VALID_URL = r'''(?x)
https?://(?:www\.)?sbs\.com\.au/(?:
ondemand(?:
/video/(?:single/)?|
/(?:movie|tv-program)/[^/]+/|
/(?:tv|news)-series/(?:[^/]+/){3}|
.*?\bplay=|/watch/
)|news/(?:embeds/)?video/
)(?P<id>[0-9]+)'''
_EMBED_REGEX = [r'''(?x)]
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''']
_TESTS = [{
# Original URL is handled by the generic IE which finds the iframe:
# http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation
# Exceptional unrestricted show for testing, thanks SBS,
# from an iframe of this page, handled by the generic IE, now 404:
# http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation, but replaced by
# https://www.sbs.com.au/programs/video/320403011771/Dingo-Conservation-The-Feed
'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed',
'md5': '3150cf278965eeabb5b4cea1c963fe0a',
'md5': 'e49d0290cb4f40d893b8dfe760dce6b0',
'info_dict': {
'id': '_rFBPRPO4pMR',
'id': '320403011771', # formerly '_rFBPRPO4pMR', no longer found
'ext': 'mp4',
'title': 'Dingo Conservation (The Feed)',
'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5',
'thumbnail': r're:http://.*\.jpg',
'thumbnail': r're:https?://.*\.jpg',
'duration': 308,
'timestamp': 1408613220,
'upload_date': '20140821',
'uploader': 'SBSC',
'tags': None,
'categories': None,
},
'expected_warnings': ['Unable to download JSON metadata'],
}, {
'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
'only_matching': True,
@ -46,33 +76,160 @@ class SBSIE(InfoExtractor):
}, {
'url': 'https://www.sbs.com.au/ondemand/watch/1698704451971',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/movie/coherence/1469404227931',
'only_matching': True,
}, {
'note': 'Live stream',
'url': 'https://www.sbs.com.au/ondemand/video/1726824003663/sbs-24x7-live-stream-nsw',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/news-series/dateline/dateline-2022/dateline-s2022-ep26/2072245827515',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/tv-series/the-handmaids-tale/season-5/the-handmaids-tale-s5-ep1/2065631811776',
'only_matching': True,
}, {
'url': 'https://www.sbs.com.au/ondemand/tv-program/autun-romes-forgotten-sister/2116212803602',
'only_matching': True,
}]
def _extract_m3u8_formats(self, m3u8_url, video_id, *args, **kwargs):
# ext, entry_protocol, preference, m3u8_id, note, errnote, fatal,
# live, data, headers, query
entry_protocol = args[1] if len(args) > 1 else kwargs.get('entry_protocol')
if not entry_protocol:
entry_protocol = 'm3u8_native'
if len(args) > 1:
args = list(args)
args[1] = entry_protocol
else:
kwargs['entry_protocol'] = entry_protocol
kwargs = compat_kwargs(kwargs)
return super(SBSIE, self)._extract_m3u8_formats(m3u8_url, video_id, *args, **kwargs)
_GEO_COUNTRIES = ['AU']
# naming for exportability
AUS_TV_PARENTAL_GUIDELINES = {
'P': 0,
'C': 7,
'G': 0,
'PG': 0,
'M': 14,
'MA15+': 15,
'AV15+': 15,
'MAV15+': 15,
'R18+': 18,
'NC': 0, # not classified (unofficial, used by SBS)
}
_PLAYER_API = 'https://www.sbs.com.au/api/v3'
_CATALOGUE_API = 'https://catalogue.pr.sbsod.com/'
_VOD_BASE_URL = 'https://sbs-vod-prod-01.akamaized.net/'
def _call_api(self, video_id, path, query=None, data=None, headers=None, fatal=True):
return self._download_json(update_url_query(
self._CATALOGUE_API + path, query),
video_id, headers=headers or {}, fatal=fatal) or {}
def _get_smil_url(self, video_id):
return update_url_query(
self._PLAYER_API + 'video_smil', {'id': video_id})
def _get_player_data(self, video_id, headers=None, fatal=False):
return self._download_json(update_url_query(
self._PLAYER_API + 'video_stream', {'id': video_id, 'context': 'tv'}),
video_id, headers=headers or {}, fatal=fatal) or {}
def _real_extract(self, url):
video_id = self._match_id(url)
player_params = self._download_json(
'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id)
# get media links directly though later metadata may contain contentUrl
smil_url = self._get_smil_url(video_id)
formats = self._extract_smil_formats(smil_url, video_id, fatal=False) or []
error = player_params.get('error')
if error:
error_message = 'Sorry, The video you are looking for does not exist.'
video_data = error.get('results') or {}
error_code = error.get('errorCode')
if error_code == 'ComingSoon':
error_message = '%s is not yet available.' % video_data.get('title', '')
elif error_code in ('Forbidden', 'intranetAccessOnly'):
error_message = 'Sorry, This video cannot be accessed via this website'
elif error_code == 'Expired':
error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '')
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
if not formats:
urlh = self._request_webpage(
HEADRequest(self._VOD_BASE_URL), video_id,
note='Checking geo-restriction', fatal=False, expected_status=403)
if urlh:
error_reasons = urlh.headers.get_all('x-error-reason') or []
if 'geo-blocked' in error_reasons:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
urls = player_params['releaseUrls']
theplatform_url = (urls.get('progressive') or urls.get('html')
or urls.get('standard') or player_params['relatedItemsURL'])
self._sort_formats(formats)
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
# try for metadata from the same source
player_data = self._get_player_data(video_id, fatal=False)
media = traverse_obj(player_data, 'video_object', expected_type=dict) or {}
# get, or add, metadata from catalogue
media.update(self._call_api(video_id, 'mpx-media/' + video_id, fatal=not media))
# utils candidate for use with traverse_obj()
def txt_or_none(s):
return (s.strip() or None) if isinstance(s, compat_str) else None
# expected_type fn for thumbs
def xlate_thumb(t):
u = url_or_none(t.get('contentUrl'))
return u and {
'id': t.get('name'),
'url': u,
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
}
# may be numeric or timecoded
def really_parse_duration(d):
result = float_or_none(d)
if result is None:
result = parse_duration(d)
return result
def traverse_media(*args, **kwargs):
nkwargs = None
if 'expected_type' not in kwargs:
kwargs['expected_type'] = txt_or_none
nkwargs = kwargs
if 'get_all' not in kwargs:
kwargs['get_all'] = False
nkwargs = kwargs
if nkwargs:
kwargs = compat_kwargs(nkwargs)
return traverse_obj(media, *args, **kwargs)
# For named episodes, use the catalogue's title to set episode, rather than generic 'Episode N'.
if traverse_media('partOfSeries', expected_type=dict):
media['epName'] = traverse_media('title')
return merge_dicts(*reversed(({
'id': video_id,
'url': smuggle_url(self._proto_relative_url(theplatform_url), {'force_smil_url': True}),
}
}, dict((k, traverse_media(v)) for k, v in {
'title': 'name',
'description': 'description',
'channel': ('taxonomy', 'channel', 'name'),
'series': ((('partOfSeries', 'name'), 'seriesTitle')),
'series_id': ((('partOfSeries', 'uuid'), 'seriesID')),
'episode': 'epName',
}.items()), {
'season_number': traverse_media((('partOfSeries', None), 'seasonNumber'), expected_type=int_or_none),
'episode_number': traverse_media('episodeNumber', expected_type=int_or_none),
'timestamp': traverse_media('datePublished', ('publication', 'startDate'),
expected_type=parse_iso8601),
'release_year': traverse_media('releaseYear', expected_type=int_or_none),
'duration': traverse_media('duration', expected_type=really_parse_duration),
'is_live': traverse_media('liveStream', expected_type=bool),
'age_limit': self.AUS_TV_PARENTAL_GUIDELINES.get(traverse_media(
'classificationID', 'contentRating', default='').upper()),
'categories': traverse_media(
('genres', Ellipsis), ('taxonomy', ('genre', 'subgenre'), 'name'),
get_all=True) or None,
'tags': traverse_media(
(('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice')), Ellipsis),
get_all=True) or None,
'thumbnails': traverse_media(('thumbnails', Ellipsis),
expected_type=xlate_thumb, get_all=True),
'formats': formats,
# TODO: _extract_smil_formats_and_subtitles()
# 'subtitles': subtitles,
'uploader': 'SBSC',
})))

View File

@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_id,
@ -11,6 +12,7 @@ from ..utils import (
strip_or_none,
unified_strdate,
urljoin,
str_to_int,
)
@ -35,6 +37,26 @@ class VidLiiIE(InfoExtractor):
'categories': ['News & Politics'],
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
# HD
'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl',
'md5': '450e7da379c884788c3a4fa02a3ce1a4',
'info_dict': {
'id': '2Ng8Abj2Fkl',
'ext': 'mp4',
'title': 'test',
'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9',
'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg',
'uploader': 'VidLii',
'uploader_url': 'https://www.vidlii.com/user/VidLii',
'upload_date': '20200927',
'duration': 5,
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Film & Animation'],
'tags': list,
},
}, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor):
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
video_url = self._search_regex(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
'video url', group='url')
formats = []
title = self._search_regex(
def add_format(format_url, height=None):
height = int(self._search_regex(r'(\d+)\.mp4',
format_url, 'height', default=360))
formats.append({
'url': format_url,
'format_id': '%dp' % height if height else None,
'height': height,
})
sources = re.findall(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1',
webpage)
formats = []
if len(sources) > 1:
add_format(sources[1][1])
self._check_formats(formats, video_id)
if len(sources) > 0:
add_format(sources[0][1])
self._sort_formats(formats)
title = self._html_search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor):
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
(r'<strong>(\d+)</strong> views',
r'Views\s*:\s*<strong>(\d+)</strong>'),
view_count = str_to_int(self._html_search_regex(
(r'<strong>([\d,.]+)</strong> views',
r'Views\s*:\s*<strong>([\d,.]+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor):
return {
'id': video_id,
'url': video_url,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,