2019-04-06 19:15:25 +00:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..compat import (
|
|
|
|
compat_str,
|
|
|
|
)
|
|
|
|
from ..utils import (
|
2022-10-28 21:55:44 +00:00
|
|
|
clean_html,
|
2022-10-28 22:43:54 +00:00
|
|
|
dict_get,
|
2019-04-06 19:15:25 +00:00
|
|
|
ExtractorError,
|
2022-10-28 21:55:44 +00:00
|
|
|
get_element_by_class,
|
2019-04-06 19:15:25 +00:00
|
|
|
int_or_none,
|
2022-10-28 22:43:54 +00:00
|
|
|
parse_iso8601,
|
2019-04-06 19:15:25 +00:00
|
|
|
str_or_none,
|
2022-10-28 21:55:44 +00:00
|
|
|
strip_or_none,
|
2019-04-06 19:15:25 +00:00
|
|
|
try_get,
|
2022-10-28 22:43:54 +00:00
|
|
|
url_or_none,
|
2019-04-06 19:15:25 +00:00
|
|
|
urlencode_postdata,
|
|
|
|
urljoin,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-09-02 18:23:22 +00:00
|
|
|
class PlatziBaseIE(InfoExtractor):
|
2019-04-06 19:15:25 +00:00
|
|
|
_LOGIN_URL = 'https://platzi.com/login/'
|
|
|
|
_NETRC_MACHINE = 'platzi'
|
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._login()
|
|
|
|
|
|
|
|
def _login(self):
|
|
|
|
username, password = self._get_login_info()
|
|
|
|
if username is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
login_page = self._download_webpage(
|
|
|
|
self._LOGIN_URL, None, 'Downloading login page')
|
|
|
|
|
|
|
|
login_form = self._hidden_inputs(login_page)
|
|
|
|
|
|
|
|
login_form.update({
|
|
|
|
'email': username,
|
|
|
|
'password': password,
|
|
|
|
})
|
|
|
|
|
|
|
|
urlh = self._request_webpage(
|
|
|
|
self._LOGIN_URL, None, 'Logging in',
|
|
|
|
data=urlencode_postdata(login_form),
|
|
|
|
headers={'Referer': self._LOGIN_URL})
|
|
|
|
|
|
|
|
# login succeeded
|
2020-02-29 12:17:27 +00:00
|
|
|
if 'platzi.com/login' not in urlh.geturl():
|
2019-04-06 19:15:25 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
login_error = self._webpage_read_content(
|
|
|
|
urlh, self._LOGIN_URL, None, 'Downloading login error page')
|
|
|
|
|
|
|
|
login = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
r'login\s*=\s*({.+?})(?:\s*;|\s*</script)', login_error, 'login'),
|
|
|
|
None)
|
|
|
|
|
|
|
|
for kind in ('error', 'password', 'nonFields'):
|
|
|
|
error = str_or_none(login.get('%sError' % kind))
|
|
|
|
if error:
|
|
|
|
raise ExtractorError(
|
|
|
|
'Unable to login: %s' % error, expected=True)
|
|
|
|
raise ExtractorError('Unable to log in')
|
|
|
|
|
2019-09-02 18:23:22 +00:00
|
|
|
|
|
|
|
class PlatziIE(PlatziBaseIE):
|
|
|
|
_VALID_URL = r'''(?x)
|
|
|
|
https?://
|
|
|
|
(?:
|
|
|
|
platzi\.com/clases| # es version
|
|
|
|
courses\.platzi\.com/classes # en version
|
|
|
|
)/[^/]+/(?P<id>\d+)-[^/?\#&]+
|
|
|
|
'''
|
|
|
|
|
|
|
|
_TESTS = [{
|
|
|
|
'url': 'https://platzi.com/clases/1311-next-js/12074-creando-nuestra-primera-pagina/',
|
|
|
|
'md5': '8f56448241005b561c10f11a595b37e3',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '12074',
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': 'Creando nuestra primera página',
|
|
|
|
'description': 'md5:4c866e45034fc76412fbf6e60ae008bc',
|
|
|
|
'duration': 420,
|
|
|
|
},
|
|
|
|
'skip': 'Requires platzi account credentials',
|
|
|
|
}, {
|
|
|
|
'url': 'https://courses.platzi.com/classes/1367-communication-codestream/13430-background/',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '13430',
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': 'Background',
|
|
|
|
'description': 'md5:49c83c09404b15e6e71defaf87f6b305',
|
|
|
|
'duration': 360,
|
|
|
|
},
|
|
|
|
'skip': 'Requires platzi account credentials',
|
|
|
|
'params': {
|
|
|
|
'skip_download': True,
|
|
|
|
},
|
|
|
|
}]
|
|
|
|
|
2019-04-06 19:15:25 +00:00
|
|
|
def _real_extract(self, url):
|
|
|
|
lecture_id = self._match_id(url)
|
|
|
|
|
2022-10-28 21:55:44 +00:00
|
|
|
headers = {'User-Agent': 'Mozilla/5.0'}
|
|
|
|
webpage = self._download_webpage(url, lecture_id, headers=headers)
|
2022-01-22 00:01:06 +00:00
|
|
|
data_preloaded_state = self._parse_json(
|
2022-01-23 20:36:00 +00:00
|
|
|
self._search_regex(
|
|
|
|
(r'window\s*.\s*__PRELOADED_STATE__\s*=\s*({.*?});?\s*</script'), webpage, 'client data'),
|
2019-04-06 19:15:25 +00:00
|
|
|
lecture_id)
|
|
|
|
|
2022-01-23 20:36:00 +00:00
|
|
|
video_player = try_get(data_preloaded_state, lambda x: x['videoPlayer'], dict)
|
2022-10-28 21:55:44 +00:00
|
|
|
title = strip_or_none(video_player.get('name')) or self._og_search_title(webpage)
|
|
|
|
servers = try_get(video_player, lambda x: x['video']['servers'], dict) or {}
|
2022-10-28 22:43:54 +00:00
|
|
|
if not servers and try_get(video_player, lambda x: x['blockedInfo']['blocked']):
|
|
|
|
why = video_player['blockedInfo'].get('type') or 'unspecified'
|
|
|
|
if why == 'unlogged':
|
|
|
|
self.raise_login_required()
|
|
|
|
raise ExtractorError(
|
|
|
|
'All video formats blocked because ' + why, expected=True)
|
|
|
|
|
2019-04-06 19:15:25 +00:00
|
|
|
formats = []
|
2022-10-28 21:55:44 +00:00
|
|
|
headers['Referer'] = url
|
|
|
|
extractions = {
|
|
|
|
'hls': lambda x: formats.extend(self._extract_m3u8_formats(
|
2022-10-29 01:44:44 +00:00
|
|
|
server_json[x], lecture_id, 'mp4',
|
|
|
|
entry_protocol='m3u8_native', m3u8_id='hls',
|
|
|
|
note='Downloading %s m3u8 information' % (server_json.get('id', x), ),
|
|
|
|
headers=headers, fatal=False)),
|
2022-10-28 21:55:44 +00:00
|
|
|
'dash': lambda x: formats.extend(self._extract_mpd_formats(
|
2022-10-29 01:44:44 +00:00
|
|
|
server_json[x], lecture_id, mpd_id='dash',
|
|
|
|
note='Downloading %s MPD manifest' % (server_json.get('id', x), ),
|
|
|
|
headers=headers, fatal=False)),
|
2022-10-28 21:55:44 +00:00
|
|
|
}
|
|
|
|
for server, server_json in servers.items():
|
|
|
|
if not isinstance(server_json, dict):
|
|
|
|
continue
|
|
|
|
for fmt in server_json.keys():
|
|
|
|
extraction = extractions.get(fmt)
|
|
|
|
if callable(extraction):
|
|
|
|
extraction(fmt)
|
2019-04-06 19:15:25 +00:00
|
|
|
self._sort_formats(formats)
|
2022-10-28 21:55:44 +00:00
|
|
|
for f in formats:
|
|
|
|
f.setdefault('http_headers', {})['Referer'] = headers['Referer']
|
2019-04-06 19:15:25 +00:00
|
|
|
|
2022-10-28 22:43:54 +00:00
|
|
|
def categories():
|
|
|
|
cat = strip_or_none(video_player.get('courseCategory'))
|
|
|
|
if cat:
|
|
|
|
return [cat]
|
|
|
|
|
2019-04-06 19:15:25 +00:00
|
|
|
return {
|
|
|
|
'id': lecture_id,
|
|
|
|
'title': title,
|
2022-10-28 22:43:54 +00:00
|
|
|
'description': clean_html(video_player.get('courseDescription')) or self._og_search_description(webpage),
|
2022-10-28 21:55:44 +00:00
|
|
|
'duration': int_or_none(video_player.get('duration'), invscale=60),
|
2022-10-28 22:43:54 +00:00
|
|
|
'thumbnail': url_or_none(video_player.get('thumbnail')) or self._og_search_thumbnail(webpage),
|
|
|
|
'timestamp': parse_iso8601(dict_get(video_player, ('dataModified', 'dataPublished'))),
|
|
|
|
'creator': strip_or_none(video_player.get('teacherName')) or clean_html(get_element_by_class('TeacherDetails-name', webpage)),
|
|
|
|
'comment_count': int_or_none(video_player.get('commentsNumber')),
|
|
|
|
'categories': categories(),
|
|
|
|
'series': strip_or_none(video_player.get('courseTitle')) or None,
|
2019-04-06 19:15:25 +00:00
|
|
|
'formats': formats,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-02 18:23:22 +00:00
|
|
|
class PlatziCourseIE(PlatziBaseIE):
|
2019-04-06 19:15:25 +00:00
|
|
|
_VALID_URL = r'''(?x)
|
|
|
|
https?://
|
|
|
|
(?:
|
|
|
|
platzi\.com/clases| # es version
|
|
|
|
courses\.platzi\.com/classes # en version
|
|
|
|
)/(?P<id>[^/?\#&]+)
|
|
|
|
'''
|
|
|
|
_TESTS = [{
|
|
|
|
'url': 'https://platzi.com/clases/next-js/',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '1311',
|
|
|
|
'title': 'Curso de Next.js',
|
|
|
|
},
|
|
|
|
'playlist_count': 22,
|
|
|
|
}, {
|
|
|
|
'url': 'https://courses.platzi.com/classes/communication-codestream/',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '1367',
|
|
|
|
'title': 'Codestream Course',
|
|
|
|
},
|
|
|
|
'playlist_count': 14,
|
|
|
|
}]
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
|
|
|
return False if PlatziIE.suitable(url) else super(PlatziCourseIE, cls).suitable(url)
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
course_name = self._match_id(url)
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, course_name)
|
|
|
|
|
2022-01-22 00:01:06 +00:00
|
|
|
initialData = self._search_regex(
|
|
|
|
(r'window.initialData\s*=\s*({.+?})\s*;\s*\n', r'window.initialData\s*=\s*({.+?})\s*;'),
|
2022-10-28 21:55:44 +00:00
|
|
|
webpage, 'initialData')
|
|
|
|
props = self._parse_json(initialData, course_name, default={})
|
|
|
|
props = try_get(props, lambda x: x['initialProps'], dict) or {}
|
2019-04-06 19:15:25 +00:00
|
|
|
entries = []
|
2022-10-28 21:55:44 +00:00
|
|
|
for chapter_num, chapter in enumerate(props.get('concepts') or [], 1):
|
2019-04-06 19:15:25 +00:00
|
|
|
if not isinstance(chapter, dict):
|
|
|
|
continue
|
|
|
|
materials = chapter.get('materials')
|
|
|
|
if not materials or not isinstance(materials, list):
|
|
|
|
continue
|
|
|
|
chapter_title = chapter.get('title')
|
|
|
|
chapter_id = str_or_none(chapter.get('id'))
|
|
|
|
for material in materials:
|
|
|
|
if not isinstance(material, dict):
|
|
|
|
continue
|
|
|
|
if material.get('material_type') != 'video':
|
|
|
|
continue
|
|
|
|
video_url = urljoin(url, material.get('url'))
|
|
|
|
if not video_url:
|
|
|
|
continue
|
|
|
|
entries.append({
|
|
|
|
'_type': 'url_transparent',
|
|
|
|
'url': video_url,
|
|
|
|
'title': str_or_none(material.get('name')),
|
|
|
|
'id': str_or_none(material.get('id')),
|
|
|
|
'ie_key': PlatziIE.ie_key(),
|
|
|
|
'chapter': chapter_title,
|
|
|
|
'chapter_number': chapter_num,
|
|
|
|
'chapter_id': chapter_id,
|
|
|
|
})
|
|
|
|
|
|
|
|
course_id = compat_str(try_get(props, lambda x: x['course']['id']))
|
|
|
|
course_title = try_get(props, lambda x: x['course']['name'], compat_str)
|
|
|
|
|
2022-10-28 21:55:44 +00:00
|
|
|
result = self.playlist_result(entries, course_id, course_title)
|
|
|
|
desc = clean_html(get_element_by_class('RouteDescription-content', webpage))
|
|
|
|
if desc:
|
|
|
|
result['description'] = desc
|
|
|
|
return result
|