1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-12-24 08:56:48 +00:00
youtube-dl/youtube_dl/extractor/nrk.py

678 lines
24 KiB
Python
Raw Normal View History

2016-10-02 11:39:18 +00:00
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_unquote,
)
from ..utils import (
ExtractorError,
2016-05-12 20:07:12 +00:00
int_or_none,
JSON_LD_RE,
NO_DEFAULT,
2016-05-12 20:07:12 +00:00
parse_age_limit,
2015-01-10 16:46:01 +00:00
parse_duration,
try_get,
)
2016-05-12 20:07:12 +00:00
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['NO']
2017-02-19 19:25:39 +00:00
_api_host = None
def _real_extract(self, url):
2015-03-21 12:20:49 +00:00
video_id = self._match_id(url)
api_hosts = (self._api_host, ) if self._api_host else self._API_HOSTS
for api_host in api_hosts:
data = self._download_json(
'http://%s/mediaelement/%s' % (api_host, video_id),
video_id, 'Downloading mediaelement JSON',
fatal=api_host == api_hosts[-1])
if not data:
continue
self._api_host = api_host
break
2016-05-12 20:07:12 +00:00
title = data.get('fullTitle') or data.get('mainTitle') or data['title']
video_id = data.get('id') or video_id
entries = []
conviva = data.get('convivaStatistics') or {}
live = (data.get('mediaElementType') == 'Live'
or data.get('isLive') is True or conviva.get('isLive'))
def make_title(t):
return self._live_title(t) if live else t
2016-05-12 20:07:12 +00:00
media_assets = data.get('mediaAssets')
if media_assets and isinstance(media_assets, list):
def video_id_and_title(idx):
return ((video_id, title) if len(media_assets) == 1
else ('%s-%d' % (video_id, idx), '%s (Part %d)' % (title, idx)))
for num, asset in enumerate(media_assets, 1):
asset_url = asset.get('url')
if not asset_url:
continue
formats = self._extract_akamai_formats(asset_url, video_id)
2016-05-12 20:07:12 +00:00
if not formats:
continue
self._sort_formats(formats)
# Some f4m streams may not work with hdcore in fragments' URLs
for f in formats:
extra_param = f.get('extra_param_to_segment_url')
if extra_param and 'hdcore' in extra_param:
del f['extra_param_to_segment_url']
2016-05-12 20:07:12 +00:00
entry_id, entry_title = video_id_and_title(num)
duration = parse_duration(asset.get('duration'))
subtitles = {}
for subtitle in ('webVtt', 'timedText'):
subtitle_url = asset.get('%sSubtitlesUrl' % subtitle)
if subtitle_url:
2016-05-20 23:09:16 +00:00
subtitles.setdefault('no', []).append({
'url': compat_urllib_parse_unquote(subtitle_url)
})
2016-05-12 20:07:12 +00:00
entries.append({
'id': asset.get('carrierId') or entry_id,
'title': make_title(entry_title),
2016-05-12 20:07:12 +00:00
'duration': duration,
'subtitles': subtitles,
'formats': formats,
})
2016-05-12 20:07:12 +00:00
if not entries:
media_url = data.get('mediaUrl')
if media_url:
formats = self._extract_akamai_formats(media_url, video_id)
2016-05-12 20:07:12 +00:00
self._sort_formats(formats)
duration = parse_duration(data.get('duration'))
entries = [{
'id': video_id,
'title': make_title(title),
2016-05-12 20:07:12 +00:00
'duration': duration,
'formats': formats,
}]
2016-05-12 20:07:12 +00:00
if not entries:
MESSAGES = {
'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
'ProgramRightsHasExpired': 'Programmet har gått ut',
'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
}
message_type = data.get('messageType', '')
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
if 'IsGeoBlocked' in message_type:
self.raise_geo_restricted(
msg=MESSAGES.get('ProgramIsGeoBlocked'),
countries=self._GEO_COUNTRIES)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, MESSAGES.get(
message_type, message_type)),
expected=True)
2016-05-12 20:07:12 +00:00
series = conviva.get('seriesName') or data.get('seriesTitle')
episode = conviva.get('episodeName') or data.get('episodeNumberOrDate')
2015-03-21 12:21:19 +00:00
season_number = None
episode_number = None
if data.get('mediaElementType') == 'Episode':
_season_episode = data.get('scoresStatistics', {}).get('springStreamStream') or \
data.get('relativeOriginUrl', '')
EPISODENUM_RE = [
r'/s(?P<season>\d{,2})e(?P<episode>\d{,2})\.',
r'/sesong-(?P<season>\d{,2})/episode-(?P<episode>\d{,2})',
]
season_number = int_or_none(self._search_regex(
EPISODENUM_RE, _season_episode, 'season number',
default=None, group='season'))
episode_number = int_or_none(self._search_regex(
EPISODENUM_RE, _season_episode, 'episode number',
default=None, group='episode'))
2016-05-12 20:07:12 +00:00
thumbnails = None
images = data.get('images')
2016-05-12 20:07:12 +00:00
if images and isinstance(images, dict):
web_images = images.get('webImages')
if isinstance(web_images, list):
thumbnails = [{
'url': image['imageUrl'],
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in web_images if image.get('imageUrl')]
description = data.get('description')
category = data.get('mediaAnalytics', {}).get('category')
2016-05-12 20:07:12 +00:00
common_info = {
'description': description,
'series': series,
'episode': episode,
'season_number': season_number,
'episode_number': episode_number,
'categories': [category] if category else None,
2016-05-12 20:07:12 +00:00
'age_limit': parse_age_limit(data.get('legalAge')),
'thumbnails': thumbnails,
}
2016-05-12 20:07:12 +00:00
vcodec = 'none' if data.get('mediaType') == 'Audio' else None
for entry in entries:
entry.update(common_info)
for f in entry['formats']:
f['vcodec'] = vcodec
2017-05-05 15:59:15 +00:00
points = data.get('shortIndexPoints')
if isinstance(points, list):
chapters = []
for next_num, point in enumerate(points, start=1):
if not isinstance(point, dict):
continue
start_time = parse_duration(point.get('startPoint'))
if start_time is None:
continue
end_time = parse_duration(
data.get('duration')
if next_num == len(points)
else points[next_num].get('startPoint'))
if end_time is None:
continue
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': point.get('title'),
})
if chapters and len(entries) == 1:
entries[0]['chapters'] = chapters
2016-05-12 20:07:12 +00:00
return self.playlist_result(entries, video_id, title, description)
class NRKIE(NRKBaseIE):
2016-10-18 20:02:14 +00:00
_VALID_URL = r'''(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/PS\*|
v8[-.]psapi\.nrk\.no/mediaelement/
2016-10-18 20:02:14 +00:00
)
)
(?P<id>[^?#&]+)
2016-10-18 20:02:14 +00:00
'''
_API_HOSTS = ('psapi.nrk.no', 'v8-psapi.nrk.no')
2016-05-12 20:07:12 +00:00
_TESTS = [{
# video
'url': 'http://www.nrk.no/video/PS*150533',
'md5': '706f34cdf1322577589e369e522b50ef',
2016-05-12 20:07:12 +00:00
'info_dict': {
'id': '150533',
2016-05-13 07:05:28 +00:00
'ext': 'mp4',
2016-05-12 20:07:12 +00:00
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
'duration': 262,
2016-05-12 20:07:12 +00:00
}
}, {
# audio
'url': 'http://www.nrk.no/video/PS*154915',
# MD5 is unstable
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
'duration': 20,
}
2016-10-18 19:59:44 +00:00
}, {
'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
}, {
'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
'only_matching': True,
2016-10-18 20:02:14 +00:00
}, {
'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
'only_matching': True,
2016-05-12 20:07:12 +00:00
}]
class NRKTVIE(NRKBaseIE):
IE_DESC = 'NRK TV and NRK Radio'
_EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
_VALID_URL = r'''(?x)
https?://
(?:tv|radio)\.nrk(?:super)?\.no/
(?:serie(?:/[^/]+){1,2}|program)/
(?![Ee]pisodes)%s
(?:/\d{2}-\d{2}-\d{4})?
(?:\#del=(?P<part_id>\d+))?
''' % _EPISODE_RE
_API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
2016-05-12 20:07:12 +00:00
_TESTS = [{
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': '9a167e54d04671eb6317a37b7bc8a280',
2016-05-12 20:07:12 +00:00
'info_dict': {
2016-05-13 07:05:28 +00:00
'id': 'MUHH48000314AA',
2016-05-12 20:07:12 +00:00
'ext': 'mp4',
2016-05-13 07:05:28 +00:00
'title': '20 spørsmål 23.05.2014',
2016-05-12 20:07:12 +00:00
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
2016-06-11 23:57:04 +00:00
'duration': 1741,
'series': '20 spørsmål',
'episode': '23.05.2014',
2016-05-12 20:07:12 +00:00
},
}, {
'url': 'https://tv.nrk.no/program/mdfp15000514',
'info_dict': {
2016-05-13 07:05:28 +00:00
'id': 'MDFP15000514CA',
2016-05-12 20:07:12 +00:00
'ext': 'mp4',
2016-05-13 07:05:28 +00:00
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
2016-06-11 23:57:04 +00:00
'duration': 4605,
'series': 'Kunnskapskanalen',
'episode': '24.05.2014',
},
'params': {
'skip_download': True,
2016-05-12 20:07:12 +00:00
},
}, {
# single playlist video
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Video is geo restricted'],
'skip': 'particular part is not supported currently',
2016-05-12 20:07:12 +00:00
}, {
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'playlist': [{
'info_dict': {
'id': 'MSPO40010515AH',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
'duration': 772,
'series': 'Tour de Ski',
'episode': '06.01.2015',
},
'params': {
'skip_download': True,
2016-05-12 20:07:12 +00:00
},
}, {
'info_dict': {
'id': 'MSPO40010515BH',
'ext': 'mp4',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
'duration': 6175,
'series': 'Tour de Ski',
'episode': '06.01.2015',
},
'params': {
'skip_download': True,
2016-05-12 20:07:12 +00:00
},
}],
'info_dict': {
'id': 'MSPO40010515',
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
},
'expected_warnings': ['Video is geo restricted'],
}, {
'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
'info_dict': {
'id': 'KMTE50001317AA',
'ext': 'mp4',
'title': 'Anno 13:30',
'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
'duration': 2340,
'series': 'Anno',
'episode': '13:30',
'season_number': 3,
'episode_number': 13,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
'info_dict': {
'id': 'MUHH46000317AA',
'ext': 'mp4',
'title': 'Nytt på Nytt 27.01.2017',
'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
'duration': 1796,
'series': 'Nytt på nytt',
'episode': '27.01.2017',
},
'params': {
'skip_download': True,
2016-05-12 20:07:12 +00:00
},
}, {
'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
'only_matching': True,
2016-05-12 20:07:12 +00:00
}]
class NRKTVEpisodeIE(InfoExtractor):
_VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/\d+/episode/\d+)'
_TEST = {
'url': 'https://tv.nrk.no/serie/backstage/sesong/1/episode/8',
'info_dict': {
'id': 'MSUI14000816AA',
'ext': 'mp4',
'title': 'Backstage 8:30',
'description': 'md5:de6ca5d5a2d56849e4021f2bf2850df4',
'duration': 1320,
'series': 'Backstage',
'season_number': 1,
'episode_number': 8,
'episode': '8:30',
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
nrk_id = self._parse_json(
self._search_regex(JSON_LD_RE, webpage, 'JSON-LD', group='json_ld'),
display_id)['@id']
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
return self.url_result(
'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
class NRKTVSerieBaseIE(InfoExtractor):
def _extract_series(self, webpage, display_id, fatal=True):
config = self._parse_json(
self._search_regex(
(r'INITIAL_DATA(?:_V\d)?_*\s*=\s*({.+?})\s*;',
r'({.+?})\s*,\s*"[^"]+"\s*\)\s*</script>'),
webpage, 'config', default='{}' if not fatal else NO_DEFAULT),
display_id, fatal=False)
if not config:
return
return try_get(
config,
(lambda x: x['initialState']['series'], lambda x: x['series']),
dict)
def _extract_seasons(self, seasons):
if not isinstance(seasons, list):
return []
entries = []
for season in seasons:
entries.extend(self._extract_episodes(season))
return entries
def _extract_episodes(self, season):
if not isinstance(season, dict):
return []
return self._extract_entries(season.get('episodes'))
def _extract_entries(self, entry_list):
if not isinstance(entry_list, list):
return []
entries = []
for episode in entry_list:
nrk_id = episode.get('prfId')
if not nrk_id or not isinstance(nrk_id, compat_str):
continue
entries.append(self.url_result(
'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id))
return entries
class NRKTVSeasonIE(NRKTVSerieBaseIE):
_VALID_URL = r'https?://tv\.nrk\.no/serie/[^/]+/sesong/(?P<id>\d+)'
_TEST = {
'url': 'https://tv.nrk.no/serie/backstage/sesong/1',
'info_dict': {
'id': '1',
'title': 'Sesong 1',
},
'playlist_mincount': 30,
}
@classmethod
def suitable(cls, url):
return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url)
else super(NRKTVSeasonIE, cls).suitable(url))
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
series = self._extract_series(webpage, display_id)
season = next(
s for s in series['seasons']
if int(display_id) == s.get('seasonNumber'))
title = try_get(season, lambda x: x['titles']['title'], compat_str)
return self.playlist_result(
self._extract_episodes(season), display_id, title)
class NRKTVSeriesIE(NRKTVSerieBaseIE):
_VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
_ITEM_RE = r'(?:data-season=["\']|id=["\']season-)(?P<id>\d+)'
_TESTS = [{
# new layout, seasons
'url': 'https://tv.nrk.no/serie/backstage',
'info_dict': {
'id': 'backstage',
'title': 'Backstage',
'description': 'md5:c3ec3a35736fca0f9e1207b5511143d3',
},
'playlist_mincount': 60,
}, {
# new layout, instalments
'url': 'https://tv.nrk.no/serie/groenn-glede',
'info_dict': {
'id': 'groenn-glede',
'title': 'Grønn glede',
'description': 'md5:7576e92ae7f65da6993cf90ee29e4608',
},
'playlist_mincount': 10,
}, {
# old layout
'url': 'https://tv.nrksuper.no/serie/labyrint',
'info_dict': {
'id': 'labyrint',
'title': 'Labyrint',
'description': 'md5:318b597330fdac5959247c9b69fdb1ec',
},
'playlist_mincount': 3,
}, {
'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/saving-the-human-race',
'only_matching': True,
}, {
'url': 'https://tv.nrk.no/serie/postmann-pat',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (
False if any(ie.suitable(url)
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKTVSeasonIE))
else super(NRKTVSeriesIE, cls).suitable(url))
def _real_extract(self, url):
series_id = self._match_id(url)
webpage = self._download_webpage(url, series_id)
# New layout (e.g. https://tv.nrk.no/serie/backstage)
series = self._extract_series(webpage, series_id, fatal=False)
if series:
title = try_get(series, lambda x: x['titles']['title'], compat_str)
description = try_get(
series, lambda x: x['titles']['subtitle'], compat_str)
entries = []
entries.extend(self._extract_seasons(series.get('seasons')))
entries.extend(self._extract_entries(series.get('instalments')))
entries.extend(self._extract_episodes(series.get('extraMaterial')))
return self.playlist_result(entries, series_id, title, description)
# Old layout (e.g. https://tv.nrksuper.no/serie/labyrint)
entries = [
self.url_result(
'https://tv.nrk.no/program/Episodes/{series}/{season}'.format(
series=series_id, season=season_id))
for season_id in re.findall(self._ITEM_RE, webpage)
]
title = self._html_search_meta(
'seriestitle', webpage,
'title', default=None) or self._og_search_title(
webpage, fatal=False)
if title:
title = self._search_regex(
r'NRK (?:Super )?TV\s*[-]\s*(.+)', title, 'title', default=title)
description = self._html_search_meta(
'series_description', webpage,
'description', default=None) or self._og_search_description(webpage)
return self.playlist_result(entries, series_id, title, description)
class NRKTVDirekteIE(NRKTVIE):
IE_DESC = 'NRK TV Direkte and NRK Radio Direkte'
_VALID_URL = r'https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://tv.nrk.no/direkte/nrk1',
'only_matching': True,
}, {
'url': 'https://radio.nrk.no/direkte/p1_oslo_akershus',
'only_matching': True,
}]
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('nrk:%s' % video_id, NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self. _extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)'
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [{
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
'info_dict': {
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
'title': 'Gjenopplev den historiske solformørkelsen',
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
},
'playlist_count': 2,
}, {
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
'info_dict': {
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
'title': 'Rivertonprisen til Karin Fossum',
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
},
'playlist_count': 2,
}]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r'https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)'
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
_TESTS = [{
'url': 'https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031',
'info_dict': {
'id': '69031',
'title': 'Nytt på nytt, sesong: 201210',
},
'playlist_count': 4,
}]
def _extract_title(self, webpage):
return self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
class NRKSkoleIE(InfoExtractor):
IE_DESC = 'NRK Skole'
2016-06-12 00:20:37 +00:00
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)'
_TESTS = [{
2016-06-12 00:20:37 +00:00
'url': 'https://www.nrk.no/skole/?page=search&q=&mediaId=14099',
'md5': '6bc936b01f9dd8ed45bc58b252b2d9b6',
'info_dict': {
'id': '6021',
2016-06-12 00:20:37 +00:00
'ext': 'mp4',
'title': 'Genetikk og eneggede tvillinger',
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
'duration': 399,
},
}, {
2016-06-12 00:20:37 +00:00
'url': 'https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355',
2016-03-02 14:57:04 +00:00
'only_matching': True,
}]
def _real_extract(self, url):
2016-06-12 00:20:37 +00:00
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://mimir.nrk.no/plugin/1.0/static?mediaId=%s' % video_id,
video_id)
2016-06-12 00:20:37 +00:00
nrk_id = self._parse_json(
self._search_regex(
r'<script[^>]+type=["\']application/json["\'][^>]*>({.+?})</script>',
webpage, 'application json'),
video_id)['activeMedia']['psId']
return self.url_result('nrk:%s' % nrk_id)