1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2026-05-08 14:44:01 +00:00

Compare commits

..

1168 Commits

Author SHA1 Message Date
dirkf 11e3e97ba3 [gh-pages] Alias latest/version 2023-08-01 20:13:00 +01:00
Sergey M․ 21b7590575 Move myself to inactive 2021-12-17 02:21:18 +07:00
Sergey M․ f298ae8802 release 2021.12.17 2021-12-17 02:01:43 +07:00
Sergey M․ 6c1249d2b4 release 2021.06.06 2021-06-06 02:01:45 +07:00
Sergey M․ 72372c78f7 release 2021.05.16 2021-05-16 22:56:44 +07:00
Sergey M․ 5deee00d46 release 2021.04.26 2021-04-26 01:34:58 +07:00
Sergey M․ 979607edcb Update link to MSVC 2010 SP1 (x86) redist (closes #28845) 2021-04-24 17:00:27 +07:00
Sergey M․ 7a41debc6a release 2021.04.17 2021-04-17 03:51:40 +07:00
Sergey M․ d6e2088f21 release 2021.04.07 2021-04-07 03:43:51 +07:00
Sergey M․ 340f735935 release 2021.04.01 2021-04-01 04:48:38 +07:00
Sergey M․ 23734970a8 release 2021.03.31 2021-03-31 03:00:33 +07:00
Sergey M․ 80058d56b3 release 2021.03.25 2021-03-25 00:05:43 +07:00
Sergey M․ 1dba9b2a1b release 2021.03.14 2021-03-14 09:39:41 +07:00
Sergey M․ b3092ac022 release 2021.03.03 2021-03-03 11:49:14 +07:00
Sergey M․ 5fb6f1bc71 release 2021.03.02 2021-03-02 06:21:38 +07:00
Sergey M․ 7eab4b4a9a release 2021.02.22 2021-02-22 02:45:58 +07:00
Sergey M․ 0c41dc0741 release 2021.02.10 2021-02-10 22:36:20 +07:00
Sergey M․ 89b207849d release 2021.02.04.1 2021-02-04 13:13:06 +07:00
Sergey M․ cbf4dee6cb release 2021.02.04 2021-02-04 04:50:57 +07:00
Sergey M․ a66bbc90fc release 2021.01.24.1 2021-01-24 18:12:56 +07:00
Sergey M․ 6273b04b73 release 2021.01.16 2021-01-16 09:53:16 +07:00
Sergey M․ 628ec56431 release 2021.01.08 2021-01-08 11:06:09 +07:00
Sergey M․ 747e773e3e release 2021.01.03 2021-01-03 09:14:43 +07:00
Sergey M․ 78a7c60c89 release 2020.12.31 2020-12-31 05:19:41 +07:00
Sergey M․ 6d9bb94008 release 2020.12.29 2020-12-29 02:55:21 +07:00
Sergey M․ 132c97246e release 2020.12.26 2020-12-26 23:19:04 +07:00
Sergey M․ b8beaff3ea release 2020.12.22 2020-12-22 04:49:36 +07:00
Sergey M․ 2338db7a69 release 2020.12.14 2020-12-14 00:59:09 +07:00
Sergey M․ 8dee034ddb Move Yen to inactive core team members 2020-12-13 05:21:38 +07:00
Sergey M․ 7e7217b17a release 2020.12.12 2020-12-12 07:11:32 +07:00
Sergey M․ 78b3e4d2e7 release 2020.12.09 2020-12-09 04:27:53 +07:00
Sergey M․ 072adab987 release 2020.12.07 2020-12-07 02:05:04 +07:00
Sergey M․ fd5e5af703 release 2020.12.05 2020-12-05 03:41:30 +07:00
Sergey M․ 9ff667f26c release 2020.12.02 2020-12-02 01:39:09 +07:00
Sergey M․ f8361daa6b release 2020.11.29 2020-11-29 13:54:30 +07:00
Sergey M․ 6f495a5525 release 2020.11.26 2020-11-26 03:07:21 +07:00
Sergey M․ 89cef3ed0c release 2020.11.24 2020-11-24 00:24:56 +07:00
Sergey M․ c7f71a7102 release 2020.11.21.1 2020-11-22 00:00:56 +07:00
Sergey M․ 89bf39b675 release 2020.11.21 2020-11-21 23:57:32 +07:00
Sergey M․ 4be1df33b3 release 2020.11.19 2020-11-19 05:24:23 +07:00
Sergey M․ 574ba8b950 release 2020.11.18 2020-11-18 04:17:08 +07:00
Sergey M․ 853fd31b84 release 2020.11.17 2020-11-17 04:01:43 +07:00
Sergey M․ b2cdf7473d Add Philipp's GPG key last signed release date 2020-11-16 21:39:47 +07:00
Sergey M․ da56601790 Actualize GPG keys section 2020-11-16 21:34:57 +07:00
Sergey M․ aeb93a55d9 release 2020.11.12 2020-11-16 21:27:43 +07:00
Sergey M․ 13423a0949 release 2020.11.01.1 2020-11-16 21:27:43 +07:00
Sergey M․ 6493199602 release 2020.11.01 2020-11-16 21:27:43 +07:00
Chih-Hsuan Yen ed3623e794 Remove references for donations for legal concerns.
Closes: https://github.com/ytdl-org/youtube-dl/issues/15194
Closes: https://github.com/ytdl-org/youtube-dl/issues/2524
2020-09-26 00:21:42 +08:00
Philipp Hagemeister c5c0ca9a17 Mark myself as inactive
Due to legal reasons, I no longer can be affiliated with the youtube-dl project.
2020-09-25 00:33:48 +02:00
Sergey M․ 73aafe6587 release 2020.09.20 2020-09-20 12:32:23 +07:00
Sergey M․ e9294b3cd0 release 2020.09.14 2020-09-14 03:38:57 +07:00
Sergey M․ 3ca6e38c3e release 2020.09.06 2020-09-06 13:02:12 +07:00
Sergey M․ 70514be571 release 2020.07.28 2020-07-28 05:14:51 +07:00
Sergey M․ 1b2524162f release 2020.06.16.1 2020-06-16 06:23:50 +07:00
Sergey M․ 1b0ccbc9d3 release 2020.06.16 2020-06-16 02:55:30 +07:00
Sergey M․ ae70e648d7 release 2020.06.06 2020-06-06 01:53:37 +07:00
Sergey M․ eb06cd75f1 release 2020.05.29 2020-05-29 03:36:59 +07:00
Sergey M․ a1fb6467a7 release 2020.05.08 2020-05-08 18:12:50 +07:00
Sergey M․ ccddbf1d38 release 2020.05.03 2020-05-03 00:07:22 +07:00
Sergey M․ 1c3a449f5f release 2020.03.24 2020-03-24 03:16:14 +07:00
Sergey M․ 26725aae4e release 2020.03.08 2020-03-08 18:30:27 +07:00
Sergey M․ 861860d6d8 release 2020.03.06 2020-03-06 00:27:17 +07:00
Sergey M․ 051cd5a534 release 2020.03.01 2020-03-01 20:13:04 +07:00
Sergey M․ 74a4ae8fd5 release 2020.02.16 2020-02-16 22:45:24 +07:00
Sergey M․ 86b3bc6bb8 release 2020.01.24 2020-01-24 04:18:39 +07:00
Sergey M․ 9020751c99 release 2020.01.15 2020-01-15 01:39:46 +07:00
Sergey M․ 6424e27c16 release 2020.01.01 2020-01-01 05:26:26 +07:00
Sergey M․ b8e1172d6c release 2019.12.25 2019-12-25 01:18:37 +07:00
Sergey M․ 69a953d6b4 release 2019.11.28 2019-11-28 23:27:18 +07:00
Sergey M․ 1dd56efcbd release 2019.11.22 2019-11-22 01:26:08 +07:00
Sergey M․ a013159360 release 2019.11.05 2019-11-05 05:34:36 +07:00
Sergey M․ 4f9877e04a release 2019.10.29 2019-10-29 06:14:08 +07:00
Sergey M․ b8cd091fab release 2019.10.22 2019-10-22 00:10:44 +07:00
Sergey M․ 95eeda441b release 2019.10.16 2019-10-16 03:28:22 +07:00
Sergey M․ 5e66f5010e release 2019.09.28 2019-09-28 00:32:51 +07:00
Sergey M․ 456fb91a96 release 2019.09.12.1 2019-09-12 03:02:00 +07:00
Sergey M․ 0a323b22e1 release 2019.09.12 2019-09-12 01:53:14 +07:00
Sergey M․ f8234fe551 release 2019.09.01 2019-09-01 03:34:50 +07:00
Sergey M․ ad1040b7b4 Add links to GPG keys 2019-09-01 03:30:14 +07:00
Sergey M․ 534772c742 release 2019.08.13 2019-08-13 23:20:50 +07:00
Sergey M․ 847f6fe89f release 2019.08.02 2019-08-02 05:41:41 +07:00
Sergey M․ 6c421d7698 release 2019.07.30 2019-07-30 09:45:52 +07:00
Sergey M․ 198f635221 release 2019.07.27 2019-07-27 03:47:02 +07:00
Sergey M․ f5c004598b release 2019.07.16 2019-07-16 00:03:58 +07:00
Sergey M․ 3c2d79f7f0 release 2019.07.14 2019-07-14 03:16:51 +07:00
Sergey M․ 84d88ccaa2 release 2019.07.12 2019-07-12 00:45:57 +07:00
Sergey M․ 4cd1f2ac10 release 2019.07.02 2019-07-02 01:11:40 +07:00
Sergey M․ 315216bb83 release 2019.06.27 2019-06-27 23:59:22 +07:00
Sergey M․ a60d0a4a55 release 2019.06.21 2019-06-21 23:06:06 +07:00
ealgase 6090852410 Update link to GitHub issues page (#21409)
Have people pick an issue template instead of creating an empty issue
2019-06-18 01:24:09 +07:00
Sergey M․ d49c19762a release 2019.06.08 2019-06-08 03:49:28 +07:00
Sergey M․ d725e72738 release 2019.05.20 2019-05-20 23:31:52 +07:00
Sergey M․ e1c2436106 release 2019.05.11 2019-05-11 06:09:44 +07:00
Sergey M․ 8092594ed6 release 2019.04.30 2019-04-30 06:14:33 +07:00
Sergey M․ aab797009d release 2019.04.24 2019-04-24 10:07:57 +07:00
Sergey M․ 70d4271b2b release 2019.04.17 2019-04-17 01:04:32 +07:00
Sergey M․ e76bf566e7 release 2019.04.07 2019-04-07 04:21:57 +07:00
Sergey M․ 64c0b8c091 release 2019.04.01 2019-04-01 23:56:54 +07:00
Sergey M․ eab897a77b release 2019.03.18 2019-03-18 01:52:59 +07:00
Sergey M․ 04cd51a0d2 Start moving to ytdl-org 2019-03-11 04:02:00 +07:00
Sergey M․ 366611a433 release 2019.03.09 2019-03-09 02:55:40 +07:00
Sergey M․ 2cdb4cbda8 release 2019.03.01 2019-03-01 01:05:49 +07:00
Sergey M․ b7c4159cf4 release 2019.02.18 2019-02-18 02:13:07 +07:00
Sergey M․ 776ab5d197 release 2019.02.08 2019-02-08 01:14:34 +07:00
Sergey M․ c8c49ed9f2 release 2019.01.30.1 2019-01-30 06:21:19 +07:00
Sergey M․ 8f784279fa release 2019.01.30 2019-01-30 00:10:39 +07:00
Sergey M․ 9efd7102bf release 2019.01.27 2019-01-27 21:30:06 +07:00
Sergey M․ d8ed44d0c5 release 2019.01.24 2019-01-24 01:47:59 +07:00
Sergey M․ f8c035788c release 2019.01.23 2019-01-23 04:49:04 +07:00
Sergey M․ 9ffe8f3881 release 2019.01.17 2019-01-17 10:29:19 +07:00
Sergey M․ dbf36b0f0d release 2019.01.16 2019-01-16 02:25:56 +07:00
Sergey M․ 3d818b9608 release 2019.01.10 2019-01-10 23:33:51 +07:00
Sergey M․ 33479e1148 release 2019.01.02 2019-01-02 23:54:41 +07:00
Sergey M․ 80a8975990 release 2018.12.31 2019-01-01 00:02:02 +07:00
Sergey M․ 025607be72 release 2018.12.17 2018-12-17 05:39:47 +07:00
Sergey M․ 0780a7264f release 2018.12.09 2018-12-09 23:13:41 +07:00
Sergey M․ b58b254a97 release 2018.12.03 2018-12-03 00:12:07 +07:00
Sergey M․ bc966de080 release 2018.11.23 2018-11-23 00:18:34 +07:00
Sergey M․ cbee43ed19 release 2018.11.18 2018-11-18 00:13:43 +07:00
Sergey M․ 882e952aef release 2018.11.07 2018-11-07 01:40:08 +07:00
Sergey M․ 5f154c6cea release 2018.11.03 2018-11-03 02:59:23 +07:00
Sergey M․ eb6f7c190b release 2018.10.29 2018-10-29 00:41:13 +07:00
Sergey M․ c2a2a95eee release 2018.10.05 2018-10-05 02:33:34 +07:00
Sergey M․ 33c2f66cce release 2018.09.26 2018-09-26 12:00:35 +07:00
Sergey M․ 7c9892610d release 2018.09.18 2018-09-18 01:48:40 +07:00
Sergey M․ 14a0b8b228 release 2018.09.10 2018-09-10 02:50:48 +07:00
Sergey M․ d5c28fe83a release 2018.09.08 2018-09-08 03:44:54 +07:00
Sergey M․ c215f09b95 release 2018.09.01 2018-09-01 18:42:18 +07:00
Sergey M․ e95b4e63a0 release 2018.08.28 2018-08-28 03:13:04 +07:00
Sergey M․ 830431d0d8 release 2018.08.22 2018-08-22 02:34:36 +07:00
Sergey M․ e10516b65d release 2018.08.04 2018-08-04 01:25:44 +07:00
Sergey M․ 3651ec647c release 2018.07.29 2018-07-29 07:04:22 +07:00
Sergey M․ 31027fac78 release 2018.07.21 2018-07-21 21:02:27 +07:00
Sergey M․ dd13bb2901 release 2018.07.10 2018-07-10 02:11:28 +07:00
Sergey M․ 1ef6d64f84 release 2018.07.04 2018-07-04 05:01:26 +07:00
Sergey M․ ea0af571a2 release 2018.06.25 2018-06-25 02:28:19 +07:00
Sergey M․ b9f6f5400b release 2018.06.19 2018-06-19 23:18:09 +07:00
Sergey M․ a6871badf8 release 2018.06.18 2018-06-18 01:36:23 +07:00
Sergey M․ 9cb5e51de0 release 2018.06.14 2018-06-14 01:27:01 +07:00
Sergey M․ 76dbdca39f release 2018.06.11 2018-06-11 01:59:24 +07:00
Sergey M․ bef98ad2f4 release 2018.06.04 2018-06-04 02:43:51 +07:00
Sergey M․ 3a8ea8d17a Actualize copyrights 2018-06-04 02:39:30 +07:00
Sergey M․ efe19c0d37 release 2018.06.02 2018-06-02 01:53:07 +07:00
Sergey M․ 7db2ace54b release 2018.05.30 2018-05-30 21:56:13 +07:00
Sergey M․ 610d07eca7 release 2018.05.26 2018-05-26 13:04:23 +07:00
Sergey M․ cff7ad06d7 release 2018.05.18 2018-05-18 00:34:43 +07:00
Sergey M․ 493beb72af release 2018.05.09 2018-05-09 00:38:22 +07:00
Sergey M․ 0f2ff4f2e2 release 2018.05.01 2018-05-01 03:41:20 +07:00
Sergey M․ b9f748bdcb release 2018.04.25 2018-04-25 02:03:29 +07:00
Sergey M․ c94a4a7d3f release 2018.04.16 2018-04-16 01:11:13 +07:00
Sergey M․ 8e6efd6191 release 2018.04.09 2018-04-09 01:21:26 +07:00
Sergey M․ 99b8fd6cd3 release 2018.04.03 2018-04-03 00:28:34 +07:00
Sergey M․ f2411c698b release 2018.03.26.1 2018-03-26 22:33:42 +07:00
Sergey M․ 63e1223703 release 2018.03.26 2018-03-26 22:27:17 +07:00
Sergey M․ 2dd46e74a2 release 2018.03.20 2018-03-20 01:58:01 +07:00
Sergey M․ 2d59dbf140 release 2018.03.14 2018-03-14 22:51:48 +07:00
Sergey M․ 0ea14737af release 2018.03.10 2018-03-10 04:48:00 +07:00
Sergey M․ b8b7dbbaef release 2018.03.03 2018-03-03 01:38:48 +07:00
Sergey M․ 40c0ea163e release 2018.02.26 2018-02-26 04:25:23 +07:00
Sergey M․ 0e9e748a01 release 2018.02.25 2018-02-25 20:53:48 +07:00
Sergey M․ 540d4d0308 release 2018.02.22 2018-02-22 23:52:36 +07:00
Sergey M․ 679abbc99e release 2018.02.11 2018-02-11 22:33:31 +07:00
Sergey M․ c291983979 release 2018.02.08 2018-02-08 23:41:34 +07:00
Sergey M․ 42f75d4d45 release 2018.02.04 2018-02-04 08:04:48 +07:00
Sergey M․ 929045923f release 2018.02.03 2018-02-03 23:46:12 +07:00
Sergey M․ 3fc2784af8 release 2018.01.27 2018-01-27 23:46:12 +07:00
Sergey M․ 2c2dd943e0 release 2018.01.21 2018-01-21 21:27:47 +07:00
Sergey M․ e655f10673 release 2018.01.18 2018-01-18 23:43:45 +07:00
Sergey M․ 49aea39b26 release 2018.01.14 2018-01-14 00:15:39 +07:00
Sergey M․ 55a35ce9a1 release 2018.01.07 2018-01-07 23:54:06 +07:00
Sergey M․ 8457b78f9d release 2017.12.31 2017-12-31 04:32:33 +07:00
Sergey M․ 23e4d4fee3 release 2017.12.28 2017-12-28 23:14:59 +07:00
Sergey M․ 81153fdc24 release 2017.12.23 2017-12-23 21:26:27 +07:00
Sergey M․ f7644f0ffd release 2017.12.14 2017-12-14 05:21:18 +07:00
Sergey M․ 4a79deb63d release 2017.12.10 2017-12-10 23:20:52 +07:00
Sergey M․ 4e748007fc release 2017.12.02 2017-12-02 21:36:37 +07:00
Sergey M․ 0bc87bb732 release 2017.11.26 2017-11-26 21:51:15 +07:00
Sergey M․ 33db337845 release 2017.11.15 2017-11-15 00:21:39 +07:00
Sergey M․ 0f2f505c2f release 2017.11.06 2017-11-06 22:43:04 +07:00
Sergey M․ 71bff58a00 release 2017.10.29 2017-10-29 07:24:53 +07:00
Sergey M․ 2a11c83e32 release 2017.10.20 2017-10-20 23:42:43 +07:00
Sergey M․ f6a68a76d3 release 2017.10.15.1 2017-10-15 06:18:34 +07:00
Sergey M․ d451428eb0 release 2017.10.15 2017-10-15 02:28:59 +07:00
Sergey M․ 004938f463 release 2017.10.12 2017-10-12 01:08:25 +07:00
Sergey M․ d810a3fd73 release 2017.10.07 2017-10-07 05:04:53 +07:00
Sergey M․ 0b4bb07d96 release 2017.10.01 2017-10-01 21:56:35 +07:00
Sergey M․ 7033a5c718 release 2017.09.24 2017-09-24 00:24:55 +07:00
Sergey M․ 19d55d19f1 release 2017.09.15 2017-09-15 21:50:11 +07:00
Sergey M․ a17cf2f21e release 2017.09.11 2017-09-11 03:32:37 +07:00
Sergey M․ 0559e642f3 release 2017.09.10 2017-09-10 22:21:15 +07:00
Sergey M․ 879f4b5a4b release 2017.09.02 2017-09-02 01:10:22 +07:00
Sergey M․ f1c88434fe release 2017.08.27.1 2017-08-27 06:11:35 +07:00
Sergey M․ 329002a07d release 2017.08.27 2017-08-27 04:30:22 +07:00
Sergey M․ 0bd3d4f43b release 2017.08.23 2017-08-23 23:25:09 +07:00
Sergey M․ 0d842f0931 release 2017.08.18 2017-08-18 01:07:28 +07:00
Sergey M․ 7ffb188d0e release 2017.08.13 2017-08-13 09:00:21 +07:00
Sergey M․ 53653526df release 2017.08.09 2017-08-09 23:54:16 +07:00
Sergey M․ 3f7778cff5 release 2017.08.06 2017-08-06 09:07:41 +07:00
Sergey M․ f502243adb release 2017.07.30.1 2017-07-30 20:49:10 +07:00
Sergey M․ 551e77c1cd release 2017.07.23 2017-07-23 01:10:58 +07:00
Sergey M․ 234fc749eb release 2017.07.15 2017-07-15 07:38:20 +07:00
Sergey M․ 3979f9d91b Actualize about page 2017-07-15 07:25:53 +07:00
Sergey M․ 2baebebe4a release 2017.07.09 2017-07-09 20:18:41 +07:00
Sergey M․ d0cd4e653b release 2017.07.02 2017-07-02 20:18:56 +07:00
Sergey M․ 4dcb6bc04a release 2017.06.25 2017-06-25 05:17:55 +07:00
Sergey M․ 78844ace39 release 2017.06.23 2017-06-23 02:15:01 +07:00
Sergey M․ dcfd9d7a45 release 2017.06.18 2017-06-18 00:18:41 +07:00
Sergey M․ dccd806cb3 release 2017.06.12 2017-06-12 02:25:02 +07:00
Sergey M․ 4096a0d66f release 2017.06.05 2017-06-05 00:49:58 +07:00
Sergey M․ 76f8048076 release 2017.05.29 2017-05-29 00:43:56 +07:00
Sergey M․ 2742d23142 release 2017.05.26 2017-05-26 22:47:49 +07:00
Sergey M․ 5f170b1bda release 2017.05.23 2017-05-23 00:19:45 +07:00
Sergey M․ bf10513151 release 2017.05.18.1 2017-05-18 23:01:39 +07:00
Sergey M․ 3b2623455b release 2017.05.18 2017-05-18 22:36:12 +07:00
Sergey M․ 48fb22678c release 2017.05.14 2017-05-14 07:40:21 +07:00
Sergey M․ 28b168333b release 2017.05.09 2017-05-09 04:22:36 +07:00
Sergey M․ 253c51a16e release 2017.05.07 2017-05-07 04:53:26 +07:00
Sergey M․ 38de738253 release 2017.05.01 2017-05-01 01:42:55 +07:00
Sergey M․ 098929c2c6 release 2017.04.28 2017-04-28 18:31:43 +07:00
Sergey M․ b876a9cd72 release 2017.04.26 2017-04-26 00:08:50 +07:00
Sergey M․ 629dc1892f release 2017.04.17 2017-04-17 00:48:58 +07:00
Sergey M․ 4b8c87e5ae release 2017.04.16 2017-04-16 01:51:15 +07:00
Sergey M․ 83a5be5c78 release 2017.04.15 2017-04-15 01:41:40 +07:00
Sergey M․ 4e0652e5fc release 2017.04.14 2017-04-14 00:33:59 +07:00
Sergey M․ 54ebda6963 release 2017.04.11 2017-04-11 02:20:05 +07:00
Sergey M․ a9af16af9d release 2017.04.09 2017-04-09 00:03:53 +07:00
Sergey M․ 59f724af05 release 2017.04.03 2017-04-03 03:56:26 +07:00
Sergey M․ da53aa217d release 2017.04.02 2017-04-02 02:40:50 +07:00
Sergey M․ 9b1a20166b release 2017.03.26 2017-03-26 08:13:15 +07:00
Sergey M․ fb6696ca3a release 2017.03.24 2017-03-24 00:26:02 +07:00
Sergey M․ 112ed9c7ab release 2017.03.22 2017-03-22 02:49:04 +07:00
Sergey M․ 14d7ce7a21 release 2017.03.20 2017-03-20 00:09:38 +07:00
Sergey M․ 1cabc6acc3 release 2017.03.16 2017-03-16 22:44:31 +07:00
Sergey M․ e327cee999 release 2017.03.15 2017-03-15 02:04:31 +07:00
Sergey M․ 366ab6fbd4 release 2017.03.10 2017-03-10 23:29:18 +07:00
Sergey M․ 69d33a207a release 2017.03.07 2017-03-07 04:00:57 +07:00
Sergey M․ af016730ce release 2017.03.06 2017-03-06 04:06:16 +07:00
Sergey M․ 92541af62c release 2017.03.05 2017-03-05 02:25:16 +07:00
Sergey M․ b2066bc974 release 2017.03.02 2017-03-03 00:21:47 +07:00
Sergey M․ dd6c468984 release 2017.02.28 2017-03-01 00:14:41 +07:00
Sergey M․ 0202d38902 release 2017.02.27 2017-02-27 23:30:02 +07:00
Sergey M․ 83b62c9084 release 2017.02.24.1 2017-02-24 22:01:27 +07:00
Sergey M․ a428893aba release 2017.02.24 2017-02-24 06:11:14 +07:00
Sergey M․ ed83a1aefb release 2017.02.22 2017-02-22 23:52:35 +07:00
Sergey M․ e9a4b18b92 release 2017.02.21 2017-02-21 23:50:31 +07:00
Sergey M․ f4b623972c release 2017.02.17 2017-02-18 00:02:14 +07:00
Sergey M․ da22904fb0 release 2017.02.16 2017-02-16 00:11:50 +07:00
Sergey M․ 15751f4b67 release 2017.02.14 2017-02-14 01:11:58 +07:00
Sergey M․ 797a77034c release 2017.02.11 2017-02-11 03:25:44 +07:00
Sergey M․ f339ed8700 release 2017.02.10 2017-02-10 01:29:30 +07:00
Sergey M․ b3b47f32a8 release 2017.02.07 2017-02-07 02:06:54 +07:00
Sergey M․ e48688b313 release 2017.02.04.1 2017-02-04 23:28:16 +07:00
Sergey M․ 0696cdc164 release 2017.02.04 2017-02-04 23:00:00 +07:00
Sergey M․ 4f0a956f84 release 2017.02.01 2017-02-01 03:22:17 +07:00
Sergey M․ 932b2a23f1 release 2017.01.31 2017-01-31 22:26:42 +07:00
Sergey M․ efcd0e8d13 release 2017.01.29 2017-01-29 13:23:27 +07:00
Sergey M․ 0f766d3382 release 2017.01.28 2017-01-28 00:36:01 +07:00
Sergey M․ 9f85dd5863 release 2017.01.25 2017-01-25 23:37:51 +07:00
Sergey M․ c445b0e36c release 2017.01.24 2017-01-24 03:00:59 +07:00
Sergey M․ 92fb683c67 release 2017.01.22 2017-01-22 19:01:16 +07:00
Sergey M․ d549f3d89e release 2017.01.18 2017-01-18 23:11:56 +07:00
Sergey M․ 51feebce2a release 2017.01.16 2017-01-16 23:46:07 +07:00
Sergey M․ 38438fdf67 release 2017.01.14 2017-01-14 07:33:02 +07:00
Sergey M․ 8999c1e5ca release 2017.01.10 2017-01-10 21:31:15 +07:00
Sergey M․ 4b53472d1a release 2017.01.08 2017-01-08 21:00:21 +07:00
Sergey M․ 9655675e93 release 2017.01.05 2017-01-05 04:12:15 +07:00
Sergey M․ 56c3abce9a release 2017.01.02 2017-01-02 23:57:04 +07:00
Sergey M․ b7cf13240d release 2016.12.31 2017-01-01 00:00:44 +07:00
Sergey M․ 8454ae5c66 release 2016.12.22 2016-12-22 22:54:48 +07:00
Sergey M․ 89155cba2f release 2016.12.20 2016-12-20 22:41:33 +07:00
Sergey M․ d6210cc567 release 2016.12.18 2016-12-18 19:52:37 +07:00
Sergey M․ 64368043c2 release 2016.12.15 2016-12-15 21:18:40 +07:00
Sergey M․ 9afe56446e release 2016.12.12 2016-12-12 01:46:48 +07:00
Sergey M․ e58db47e88 release 2016.12.09 2016-12-09 02:48:10 +07:00
Sergey M․ b3fce8f4b0 release 2016.12.01 2016-12-01 00:16:55 +07:00
Sergey M․ bbc77dc77b release 2016.11.27 2016-11-27 20:07:45 +07:00
Sergey M․ 5dd9c9bd8e release 2016.11.22 2016-11-22 22:34:14 +07:00
Sergey M․ 872e22ef7a release 2016.11.18 2016-11-18 00:27:06 +07:00
Sergey M․ b70620141d release 2016.11.14.1 2016-11-14 02:49:51 +07:00
Sergey M․ 5abd12d6db release 2016.11.08.1 2016-11-08 22:32:44 +07:00
Sergey M․ 0d64f775fd release 2016.11.08 2016-11-08 22:13:07 +07:00
Sergey M․ 498a616957 release 2016.11.04 2016-11-04 22:10:57 +07:00
Sergey M․ df811195b3 release 2016.11.02 2016-11-02 02:41:14 +07:00
Sergey M․ 8a4416f196 release 2016.10.31 2016-10-31 01:38:45 +07:00
Sergey M․ 078a5ae930 release 2016.10.26 2016-10-26 19:57:26 +07:00
Sergey M․ 28bfe57d98 release 2016.10.25 2016-10-25 03:21:27 +07:00
Sergey M․ f35f2c3054 release 2016.10.21.1 2016-10-21 05:01:59 +07:00
Sergey M․ d979231c90 release 2016.10.21 2016-10-21 03:14:17 +07:00
Sergey M․ b408c2cee9 release 2016.10.19 2016-10-19 23:58:20 +07:00
Sergey M․ 2a230e0d14 release 2016.10.16 2016-10-16 03:26:09 +07:00
Sergey M․ 23aa1b6d8a release 2016.10.12 2016-10-12 21:32:32 +07:00
Sergey M․ a054e619cc release 2016.10.07 2016-10-07 22:27:15 +07:00
Sergey M․ a2a5e76628 release 2016.10.02 2016-10-02 15:59:38 +07:00
Sergey M․ b7538715d6 release 2016.09.27 2016-09-27 23:13:50 +07:00
Sergey M․ d3a2da730d release 2016.09.24 2016-09-24 02:18:35 +07:00
Sergey M․ 54d490ab70 release 2016.09.19 2016-09-19 03:00:23 +07:00
Sergey M․ c5647fb553 release 2016.09.18 2016-09-18 17:19:22 +07:00
Sergey M․ cd9433026d release 2016.09.15 2016-09-15 23:51:59 +07:00
Sergey M․ 09bb21c5c1 release 2016.09.11.1 2016-09-11 23:36:05 +07:00
Sergey M․ f50f62f3ba release 2016.09.11 2016-09-11 23:22:44 +07:00
Sergey M․ e0217ea0c7 release 2016.09.08 2016-09-08 23:49:54 +07:00
Sergey M․ 9515d34cdf Put donations info into separate paragraph 2016-09-08 23:44:37 +07:00
Philipp Hagemeister f8e043ccff Merge pull request #10590 from prayashm/patch-1
Add donations.html link
2016-09-08 14:55:53 +02:00
Prayash Mohapatra 55ae1a5152 Add donations.html link 2016-09-08 02:17:57 +05:30
Sergey M․ fff5d4c3da release 2016.09.04.1 2016-09-04 21:00:18 +07:00
Sergey M․ ae7c722607 release 2016.09.04 2016-09-04 20:54:02 +07:00
Sergey M․ 7bafd90591 release 2016.09.03 2016-09-03 01:49:22 +07:00
Sergey M․ 384c6af240 release 2016.08.31 2016-08-31 02:41:46 +07:00
Sergey M․ ca3d1c51dc release 2016.08.28 2016-08-28 07:25:37 +07:00
Sergey M․ f18c4ec060 release 2016.08.24.1 2016-08-24 10:13:08 +07:00
Sergey M․ 37f34abf6b release 2016.08.24 2016-08-24 01:41:06 +07:00
Philipp Hagemeister b65bb17fd9 highlight actual fingerprints
It has always been possible to generate keys with the same key ID (it's only 32 Bit, duh). There has now been a public demonstration of just that.
Do what we should have done ages ago by listing the full fingerprint only.
2016-08-22 01:26:41 +02:00
Sergey M․ 33f2f9f683 release 2016.08.22 2016-08-22 04:19:28 +07:00
Sergey M․ 9f87328418 release 2016.08.19 2016-08-19 00:17:23 +07:00
Sergey M․ b46638472a release 2016.08.17 2016-08-17 06:23:32 +07:00
Sergey M․ ddc28e92b9 release 2016.08.13 2016-08-13 23:19:51 +07:00
Sergey M․ 082742d195 release 2016.08.12 2016-08-12 00:24:30 +07:00
Philipp Hagemeister 310ec3b601 download.html.in: Remove outdated warning
No current webbrowser I am aware of has this behavior.
2016-08-09 20:03:20 +02:00
Sergey M․ dc27b9ef78 release 2016.08.10 2016-08-10 00:23:47 +07:00
Sergey M․ 99e1e39110 release 2016.08.07 2016-08-07 21:15:26 +07:00
Sergey M․ 5139edb775 release 2016.08.06 2016-08-06 01:25:42 +07:00
Sergey M․ 74cf4844c8 release 2016.08.01 2016-08-01 23:01:50 +07:00
Sergey M․ 22625eefbc release 2016.07.30 2016-07-30 14:47:34 +07:00
Sergey M․ 9b52314abf release 2016.07.28 2016-07-28 02:44:58 +07:00
Sergey M․ 3f6ea8989c release 2016.07.26.2 2016-07-26 23:59:39 +07:00
Sergey M․ ecd7dd9d08 release 2016.07.24 2016-07-24 11:41:33 +07:00
Sergey M․ c7bd93ab2f Actualize about page 2016-07-24 11:37:23 +07:00
Sergey M․ 10e07d485f release 2016.07.22 2016-07-22 23:13:47 +07:00
Sergey M․ 9d6f60a694 release 2016.07.17 2016-07-17 19:07:37 +07:00
Sergey M․ 3dbb634bf3 Use latest in download URLs 2016-07-17 18:56:36 +07:00
Sergey M․ 3533ba6d46 release 2016.07.16 2016-07-16 02:21:57 +07:00
Sergey M․ 252faa3af8 release 2016.07.13 2016-07-14 00:00:39 +07:00
Philipp Hagemeister 06da7aaca8 download.html: correct design 2016-07-12 00:15:59 +02:00
Philipp Hagemeister 55c68753df download.html: only use SHA256 sum
MD5 and to a lesser degree SHA1 are no longer useful to detect malicious modfication, so don't mention them.
Clarify that the numbers after the exe are hashsums.
Closes #10066
2016-07-12 00:15:17 +02:00
Sergey M․ 11e59ef04e release 2016.07.11 2016-07-11 03:19:00 +07:00
Sergey M․ b6b3380673 release 2016.07.09.2 2016-07-09 22:24:33 +07:00
Sergey M․ 9ebe17e80d release 2016.07.09.1 2016-07-09 10:08:07 +07:00
Sergey M․ 1559154b5a release 2016.07.09 2016-07-09 07:19:01 +07:00
Sergey M․ f84c714975 release 2016.07.07 2016-07-07 01:56:07 +07:00
Sergey M․ e1ae332b66 release 2016.07.06 2016-07-06 00:56:17 +07:00
Sergey M․ 51b27bbd59 release 2016.07.05 2016-07-05 06:35:15 +07:00
Sergey M․ c3eaebf2b3 release 2016.07.03.1 2016-07-03 21:31:18 +07:00
Sergey M․ 24ddfc3d4c release 2016.07.03 2016-07-03 03:21:10 +07:00
Philipp Hagemeister d894820391 Move @FiloSottile's key to historical 2016-07-01 22:12:56 +02:00
Sergey M․ 3ca7a40328 release 2016.07.02 2016-07-02 02:49:58 +07:00
Sergey M․ 7943fc726f release 2016.07.01 2016-07-01 04:01:45 +07:00
Sergey M․ e6c82a724d release 2016.06.30 2016-06-30 23:59:14 +07:00
Sergey M․ a029847fad release 2016.06.27 2016-06-27 23:12:15 +07:00
Sergey M․ 3aa66003a2 release 2016.06.26 2016-06-26 21:17:53 +07:00
Sergey M․ 0d208ed61d release 2016.06.25 2016-06-25 03:04:53 +07:00
Sergey M․ 7d32baaca9 release 2016.06.23.1 2016-06-23 09:44:24 +07:00
Sergey M․ 0b1a935eac release 2016.06.23 2016-06-23 04:31:07 +07:00
Sergey M․ ce64943931 release 2016.06.22 2016-06-22 23:46:10 +07:00
Sergey M․ 772283aa5f release 2016.06.20 2016-06-20 20:52:18 +07:00
Sergey M․ 5d5615f148 release 2016.06.19.1 2016-06-19 03:59:03 +07:00
Sergey M․ d47189bbcc release 2016.06.19 2016-06-19 02:33:32 +07:00
Sergey M․ 658975165e Add link to more sites 2016-06-18 22:12:24 +07:00
Sergey M․ d6d7600e37 Remove 'small' from description (Closes #9814) 2016-06-18 22:07:43 +07:00
Sergey M․ 2b60161e60 release 2016.06.18.1 2016-06-18 06:22:31 +07:00
Sergey M․ d7c814e4f7 release 2016.06.16 2016-06-16 22:52:30 +07:00
Sergey M․ 058c2daacc release 2016.06.14 2016-06-14 02:22:04 +07:00
Sergey M․ 145c9da564 release 2016.06.12 2016-06-12 12:08:33 +07:00
Sergey M․ 4cdb53c6ed release 2016.06.11.3 2016-06-11 08:35:40 +07:00
Sergey M․ 4c6805ceb2 release 2016.06.11.1 2016-06-11 03:02:13 +07:00
Sergey M․ dfb01371c8 release 2016.06.11 2016-06-11 02:44:49 +07:00
Sergey M․ af480e49ed Add -L to curl command line 2016-06-11 02:30:37 +07:00
Sergey M․ 8793251a03 release 2016.06.03 2016-06-03 23:27:48 +07:00
Sergey M․ 3d4970c6c0 release 2016.06.02 2016-06-02 01:21:51 +07:00
Sergey M․ 579ce0021e release 2016.05.30.2 2016-05-30 03:13:19 +07:00
Sergey M․ c063643e0f release 2016.05.30.1 2016-05-30 03:04:42 +07:00
Sergey M․ 1ab2908c7a release 2016.05.30 2016-05-30 01:19:32 +07:00
Sergey M․ d761e8a499 Clarify c runtime dependency 2016-05-30 00:54:48 +07:00
Sergey M․ 16a78ee8a8 Clarify Windows build 2016-05-29 12:42:10 +06:00
Sergey M․ a4084fc999 Add myself to GPG keys list 2016-05-29 12:25:17 +06:00
Philipp Hagemeister 27425e1e20 release 2016.05.21.2 2016-05-21 21:50:48 +02:00
Philipp Hagemeister 9f24d385e1 release 2016.05.16 2016-05-16 17:28:38 +02:00
Philipp Hagemeister 2b2f5d9156 release 2016.05.10 2016-05-10 09:14:08 +02:00
Philipp Hagemeister 4b15902432 release 2016.05.01 2016-05-01 10:30:29 +02:00
Philipp Hagemeister ef9ade0b77 release 2016.04.24 2016-04-24 17:09:44 +02:00
Philipp Hagemeister c11b84913a release 2016.04.19 2016-04-19 03:10:06 +02:00
Philipp Hagemeister 054156ac27 release 2016.04.13 2016-04-13 08:05:08 +02:00
Philipp Hagemeister 4388049201 release 2016.04.06 2016-04-06 15:16:29 +02:00
Philipp Hagemeister 737468e540 release 2016.04.05 2016-04-05 18:39:14 +02:00
Philipp Hagemeister e8c339da56 release 2016.04.01 2016-04-01 09:13:47 +02:00
Philipp Hagemeister 4555a7c83a release 2016.03.27 2016-03-27 17:00:27 +02:00
Philipp Hagemeister 9a71eb490e release 2016.03.26 2016-03-26 09:01:03 +01:00
Philipp Hagemeister 3d547b08c0 release 2016.03.25 2016-03-25 09:30:38 +01:00
Philipp Hagemeister 25982d3f66 release 2016.03.18 2016-03-18 11:46:46 +01:00
Philipp Hagemeister 74afb16e77 release 2016.03.14 2016-03-14 10:29:54 +01:00
Philipp Hagemeister 63e3ee4264 release 2016.03.06 2016-03-06 10:13:35 +01:00
Philipp Hagemeister fe10c160cc release 2016.03.01 2016-03-01 00:11:09 +01:00
Philipp Hagemeister c04b8cca69 release 2016.02.27 2016-02-27 21:41:57 +01:00
Philipp Hagemeister a3c2bdf818 release 2016.02.22 2016-02-22 12:01:27 +01:00
Philipp Hagemeister 8f8a03e36a release 2016.02.13 2016-02-13 08:30:15 +01:00
Philipp Hagemeister c1f884ce73 release 2016.02.10 2016-02-10 16:21:10 +01:00
Philipp Hagemeister 13437d26fa release 2016.02.09.1 2016-02-09 20:23:54 +01:00
Philipp Hagemeister 25fb0a671f release 2016.02.09 2016-02-09 13:01:38 +01:00
Philipp Hagemeister 7befbd830a release 2016.02.05.1 2016-02-05 15:19:14 +01:00
Philipp Hagemeister 56a6bfac2f release 2016.02.05 2016-02-05 11:07:33 +01:00
Philipp Hagemeister 4c13aed2ac release 2016.02.04 2016-02-04 13:40:48 +01:00
Philipp Hagemeister 854d1627da release 2016.02.01 2016-02-01 12:15:32 +01:00
Philipp Hagemeister f960ec7fad release 2016.01.31 2016-01-31 13:03:08 +01:00
Philipp Hagemeister d8a6bb81c0 release 2016.01.29 2016-01-29 12:24:08 +01:00
Philipp Hagemeister 168eb9c372 release 2016.01.27 2016-01-27 09:05:56 +01:00
Philipp Hagemeister 67ad5ea0d7 release 2016.01.23 2016-01-23 12:05:43 +01:00
Philipp Hagemeister 757043ab70 release 2016.01.15 2016-01-15 19:46:44 +01:00
Philipp Hagemeister 196c06f959 release 2016.01.14 2016-01-14 15:47:26 +01:00
Philipp Hagemeister fe8cbe9623 release 2016.01.09 2016-01-09 01:19:28 +01:00
Philipp Hagemeister 46f6a0dcdb release 2016.01.01 2016-01-01 12:21:17 +01:00
Philipp Hagemeister 1b3da21bcd release 2015.12.31 2015-12-31 16:57:33 +01:00
Philipp Hagemeister 7c765c45c9 release 2015.12.29 2015-12-29 11:05:50 +01:00
Philipp Hagemeister 217f29f44d release 2015.12.23 2015-12-23 14:08:19 +01:00
Philipp Hagemeister 411ef9fd0c release 2015.12.21 2015-12-21 11:46:41 +01:00
Philipp Hagemeister c096b6a83c release 2015.12.18 2015-12-18 14:25:26 +01:00
Philipp Hagemeister f5a5e29d2c release 2015.12.13 2015-12-13 10:56:18 +01:00
Philipp Hagemeister 6b71876402 release 2015.12.10 2015-12-10 17:07:19 +01:00
Philipp Hagemeister aaf061ce5e release 2015.12.09 2015-12-09 15:45:26 +01:00
Philipp Hagemeister 71e67c54d3 release 2015.12.06 2015-12-06 18:52:11 +01:00
Philipp Hagemeister 136d318592 release 2015.12.05 2015-12-05 16:09:27 +01:00
Philipp Hagemeister 111d1bdaeb release 2015.11.27.1 2015-11-27 16:43:48 +01:00
Philipp Hagemeister 07b956eeab release 2015.11.24 2015-11-24 07:49:46 +01:00
Philipp Hagemeister 7d94a5677f release 2015.11.23 2015-11-23 18:11:03 +01:00
Philipp Hagemeister 8633e60931 release 2015.11.21 2015-11-21 23:38:43 +01:00
Philipp Hagemeister da83d86935 release 2015.11.19 2015-11-19 15:40:08 +01:00
Philipp Hagemeister 157ddaf5c4 release 2015.11.18 2015-11-18 19:25:23 +01:00
Philipp Hagemeister 71dab9aee6 release 2015.11.15 2015-11-15 22:18:27 +01:00
Philipp Hagemeister ade87212d2 release 2015.11.13 2015-11-13 11:08:24 +01:00
Philipp Hagemeister abf90fe835 release 2015.11.10 2015-11-10 11:44:39 +01:00
Philipp Hagemeister a50ad865c5 release 2015.11.02 2015-11-02 16:24:38 +01:00
Philipp Hagemeister 5a00a85c62 release 2015.11.01 2015-11-01 14:22:57 +01:00
Philipp Hagemeister a158a7168e release 2015.10.24 2015-10-24 00:13:34 +02:00
Philipp Hagemeister ac132aee81 release 2015.10.23 2015-10-23 09:49:49 +02:00
Philipp Hagemeister 0791453d64 release 2015.10.18 2015-10-18 19:48:24 +02:00
Philipp Hagemeister 3dd2d8588b release 2015.10.16 2015-10-16 21:44:35 +02:00
Philipp Hagemeister 4b16f7be5b release 2015.10.13 2015-10-13 01:10:54 +02:00
Philipp Hagemeister e4c56e74a6 release 2015.10.12 2015-10-12 06:56:32 +02:00
Philipp Hagemeister 592d526e3e release 2015.10.09 2015-10-09 09:12:26 +02:00
Philipp Hagemeister f7d5b32507 release 2015.10.06.2 2015-10-06 23:50:03 +02:00
Philipp Hagemeister cde9823276 release 2015.10.06.1 2015-10-06 17:48:41 +02:00
Philipp Hagemeister c7ecb30755 release 2015.10.06 2015-10-06 09:16:37 +02:00
Philipp Hagemeister 7b7a810726 release 2015.09.28 2015-09-28 04:51:58 +02:00
Philipp Hagemeister bf98b012da release 2015.09.22 2015-09-22 22:53:10 +02:00
Philipp Hagemeister fa420d115d release 2015.09.09 2015-09-09 21:28:28 +02:00
Philipp Hagemeister 9caabdca83 release 2015.09.03 2015-09-03 12:35:51 +02:00
Philipp Hagemeister 4027267350 release 2015.08.28 2015-08-28 05:07:35 +02:00
Philipp Hagemeister 40cba3ae9a release 2015.08.23 2015-08-24 00:08:11 +02:00
Philipp Hagemeister 4dcb412869 release 2015.08.16.1 2015-08-16 23:52:09 +02:00
Philipp Hagemeister 9d55e73482 release 2015.08.16 2015-08-16 01:08:18 +02:00
Philipp Hagemeister a4f3acef97 release 2015.08.09 2015-08-09 20:18:19 +02:00
Philipp Hagemeister e74d2bdbc5 release 2015.08.06.1 2015-08-06 23:10:27 +02:00
Philipp Hagemeister d8196bece3 release 2015.07.28 2015-07-28 11:36:28 +02:00
Philipp Hagemeister 416f66a0b8 release 2015.07.21 2015-07-21 17:45:38 +02:00
Philipp Hagemeister a5d13c04d9 release 2015.07.18 2015-07-18 12:07:28 +02:00
Philipp Hagemeister b1a027002f release 2015.07.07 2015-07-07 10:45:03 +02:00
Philipp Hagemeister 87fa98037b release 2015.07.04 2015-07-04 09:38:09 +02:00
Philipp Hagemeister 3bb38bbbc5 release 2015.06.25 2015-06-25 07:46:57 +02:00
Philipp Hagemeister 9004185837 release 2015.06.15 2015-06-15 01:42:07 +02:00
Philipp Hagemeister adb380a355 Merge branch 'gh-pages' of github.com:rg3/youtube-dl into gh-pages 2015-06-04 21:59:19 +02:00
Philipp Hagemeister f1d3e95e46 release 2015.06.04.1 2015-06-04 21:56:50 +02:00
Philipp Hagemeister 9a75356870 release 2015.05.29 2015-05-29 08:14:46 +02:00
Sergey M. 1fc67b04d6 Clarify chmod command access permissions 2015-05-24 18:40:19 +06:00
Philipp Hagemeister 6685d1371f release 2015.05.20 2015-05-20 10:12:55 +02:00
Philipp Hagemeister 7ab18af783 release 2015.05.15 2015-05-15 10:30:06 +02:00
Philipp Hagemeister 36525e582b release 2015.05.10 2015-05-10 01:17:36 +02:00
Philipp Hagemeister 996790ebf8 release 2015.05.04 2015-05-04 15:16:16 +02:00
Philipp Hagemeister e75db9e70d release 2015.05.03 2015-05-03 22:44:47 +02:00
Philipp Hagemeister 70564bedb1 release 2015.04.28 2015-04-28 09:27:42 +02:00
Philipp Hagemeister 9717bdcc3d release 2015.04.26 2015-04-26 22:55:13 +02:00
Philipp Hagemeister 885c2259ec release 2015.04.17 2015-04-17 11:42:06 +02:00
Philipp Hagemeister 0882affb2e release 2015.04.09 2015-04-09 00:38:51 +02:00
Philipp Hagemeister d4317e2562 release 2015.04.03 2015-04-03 10:31:42 +02:00
Philipp Hagemeister 444be07f45 release 2015.03.28 2015-03-28 08:37:41 +01:00
Philipp Hagemeister a0ffcdb14c release 2015.03.24 2015-03-24 16:43:07 +01:00
Philipp Hagemeister a3c4b14e99 release 2015.03.18 2015-03-18 22:12:14 +01:00
Philipp Hagemeister 2548810ae1 release 2015.03.15 2015-03-15 19:42:28 +01:00
Philipp Hagemeister edf21705b7 release 2015.03.09 2015-03-09 03:08:48 +01:00
Philipp Hagemeister cac4d9ca02 release 2015.03.03.1 2015-03-03 14:04:17 +01:00
Philipp Hagemeister 801be27bf7 release 2015.03.03 2015-03-03 00:08:50 +01:00
Philipp Hagemeister c32a9ec29d release 2015.02.28 2015-02-28 21:28:20 +01:00
Philipp Hagemeister 6bcb3bb7a5 release 2015.02.26.2 2015-02-26 09:49:14 +01:00
Philipp Hagemeister 9dd5db7193 release 2015.02.26.1 2015-02-26 01:49:37 +01:00
Philipp Hagemeister 0b2374ea7e release 2015.02.26 2015-02-26 00:45:36 +01:00
Philipp Hagemeister 87ef91e888 release 2015.02.24.2 2015-02-24 16:37:49 +01:00
Philipp Hagemeister 81cb7c3f82 release 2015.02.24.1 2015-02-24 11:41:55 +01:00
Philipp Hagemeister b7fd00d68e release 2015.02.24 2015-02-24 11:32:25 +01:00
Philipp Hagemeister 01c432efcf release 2015.02.23.1 2015-02-23 18:56:21 +01:00
Philipp Hagemeister 540800e022 release 2015.02.23 2015-02-23 16:49:08 +01:00
Philipp Hagemeister b7c2f29c71 release 2015.02.21 2015-02-21 21:33:35 +01:00
Philipp Hagemeister 5f1e535937 release 2015.02.20 2015-02-20 23:29:08 +01:00
Philipp Hagemeister 31316b769c release 2015.02.19.3 2015-02-19 19:32:51 +01:00
Philipp Hagemeister 34779a9f20 release 2015.02.19.2 2015-02-19 01:47:14 +01:00
Philipp Hagemeister 4665572a61 release 2015.02.19.1 2015-02-19 01:08:30 +01:00
Philipp Hagemeister 25547d4e68 release 2015.02.19 2015-02-19 00:37:13 +01:00
Philipp Hagemeister 6d2840b147 release 2015.02.18.1 2015-02-18 11:03:11 +01:00
Philipp Hagemeister 6ea3d86cfe release 2015.02.18 2015-02-18 00:53:44 +01:00
Philipp Hagemeister 114d17529f release 2015.02.17.2 2015-02-17 17:40:26 +01:00
Philipp Hagemeister 597232528d release 2015.02.17 2015-02-17 17:31:00 +01:00
Philipp Hagemeister 4d1b121aa6 release 2015.02.16.1 2015-02-16 15:49:30 +01:00
Philipp Hagemeister 327906b652 release 2015.02.16 2015-02-16 04:55:10 +01:00
Philipp Hagemeister 776a127f38 release 2015.02.11 2015-02-11 19:07:08 +01:00
Philipp Hagemeister 9f0acce278 release 2015.02.10.5 2015-02-10 16:03:50 +01:00
Philipp Hagemeister 08f4c0f0e0 release 2015.02.10.4 2015-02-10 11:32:20 +01:00
Philipp Hagemeister 0103a02725 release 2015.02.10.3 2015-02-10 05:56:38 +01:00
Philipp Hagemeister 79379d02f3 release 2015.02.10.2 2015-02-10 03:36:45 +01:00
Philipp Hagemeister 2d2bd2a8cb release 2015.02.10.1 2015-02-10 01:56:52 +01:00
Philipp Hagemeister bb11b2fb3e release 2015.02.10 2015-02-10 01:24:01 +01:00
Philipp Hagemeister 071bec0e36 release 2015.02.09.3 2015-02-09 16:02:56 +01:00
Philipp Hagemeister 7fbd4db982 release 2015.02.09.2 2015-02-09 14:51:07 +01:00
Philipp Hagemeister 4e5165686d release 2015.02.09.1 2015-02-09 10:54:59 +01:00
Philipp Hagemeister 5525db3f74 release 2015.02.09 2015-02-09 10:33:02 +01:00
Philipp Hagemeister a5d09c457d release 2015.02.06 2015-02-06 14:49:03 +01:00
Philipp Hagemeister 3e88ea52e6 release 2015.02.04 2015-02-04 16:11:52 +01:00
Philipp Hagemeister 40fc4799f7 release 2015.02.03.1 2015-02-03 11:07:13 +01:00
Philipp Hagemeister cff0f7ded9 release 2015.02.03 2015-02-03 00:28:30 +01:00
Philipp Hagemeister 80dc055bf1 release 2015.02.02.5 2015-02-02 23:50:19 +01:00
Philipp Hagemeister e4e7579d0f release 2015.02.02.4 2015-02-02 23:41:52 +01:00
Philipp Hagemeister 683d41296c release 2015.02.02.2 2015-02-02 22:14:05 +01:00
Philipp Hagemeister 6169a90659 release 2015.02.02 2015-02-02 01:56:28 +01:00
Philipp Hagemeister 00ea5a500d release 2015.01.30.2 2015-01-30 04:49:09 +01:00
Philipp Hagemeister eb0846f1f0 release 2015.01.30.1 2015-01-30 04:02:43 +01:00
Philipp Hagemeister 2b11530bdb Replace documentation.html with README.md
This allows us to have all the current documentation in one place.
Closes #4817.
2015-01-30 03:01:19 +01:00
Philipp Hagemeister 2f6ff3c625 release 2015.01.25 2015-01-25 21:56:31 +01:00
Philipp Hagemeister b1b1bb29eb release 2015.01.23.4 2015-01-23 18:59:52 +01:00
Philipp Hagemeister b13efe451f release 2015.01.23.3 2015-01-23 12:21:28 +01:00
Philipp Hagemeister 0eb22525e7 release 2015.01.23.2 2015-01-23 11:24:42 +01:00
Philipp Hagemeister 1a2d700c8a release 2015.01.23.1 2015-01-23 00:35:58 +01:00
Philipp Hagemeister c7eab954b7 release 2015.01.23 2015-01-23 00:10:58 +01:00
Philipp Hagemeister 0fc90fb0ff release 2015.01.22 2015-01-22 13:11:14 +01:00
Philipp Hagemeister fdf7547735 release 2015.01.16 2015-01-16 14:26:02 +01:00
Philipp Hagemeister 03105f6854 release 2015.01.15.1 2015-01-15 22:46:27 +01:00
Philipp Hagemeister 41a49da1af release 2015.01.15 2015-01-15 12:50:30 +01:00
Philipp Hagemeister 3a863e1fdb release 2015.01.11 2015-01-11 17:52:27 +01:00
Philipp Hagemeister ef90868590 release 2015.01.10.2 2015-01-10 21:05:47 +01:00
Philipp Hagemeister 998259644e release 2015.01.10.1 2015-01-10 20:08:51 +01:00
Philipp Hagemeister febd9d6e84 release 2015.01.10 2015-01-10 05:54:30 +01:00
Philipp Hagemeister bb50007287 release 2015.01.09.2 2015-01-10 00:03:35 +01:00
Philipp Hagemeister fbcbaa08d2 release 2015.01.09.1 2015-01-09 21:35:36 +01:00
Philipp Hagemeister 79ae0d9359 release 2015.01.09 2015-01-09 20:24:35 +01:00
Philipp Hagemeister 73d6793ff4 release 2015.01.08 2015-01-08 16:21:40 +01:00
Philipp Hagemeister 2933146728 release 2015.01.07.2 2015-01-07 07:47:14 +01:00
Philipp Hagemeister 459c608b68 release 2015.01.07 2015-01-07 07:27:44 +01:00
Philipp Hagemeister 1b4bdeefe9 release 2015.01.05.1 2015-01-05 22:45:29 +01:00
Philipp Hagemeister 07f924bc45 release 2015.01.05 2015-01-05 18:56:02 +01:00
Philipp Hagemeister 0d17780e87 release 2015.01.04 2015-01-04 03:19:09 +01:00
Philipp Hagemeister d128f35310 Denote Python 3.2 support 2015-01-04 02:52:19 +01:00
Philipp Hagemeister 051e107eff Link to modern FAQ 2015-01-04 02:49:50 +01:00
Philipp Hagemeister 9c4ebf4970 Avoid mixed-content warnings 2015-01-04 02:46:28 +01:00
Philipp Hagemeister b59fb8d81b release 2015.01.03 2015-01-03 18:38:38 +01:00
Philipp Hagemeister d1f2600900 release 2015.01.02 2015-01-02 16:00:32 +01:00
Philipp Hagemeister e9260dbb31 release 2015.01.01 2015-01-01 21:51:17 +01:00
Philipp Hagemeister 868522e4c2 release 2014.12.17.2 2014-12-17 11:42:44 +01:00
Philipp Hagemeister 8149c54acf release 2014.12.17.1 2014-12-17 11:32:32 +01:00
Philipp Hagemeister 01ec54d121 release 2014.12.17 2014-12-17 10:57:14 +01:00
Philipp Hagemeister fa3d853f77 release 2014.12.16.2 2014-12-16 16:53:12 +01:00
Philipp Hagemeister 37d868f0f4 release 2014.12.16.1 2014-12-16 16:08:07 +01:00
Philipp Hagemeister 678d7bff51 release 2014.12.16 2014-12-16 00:33:55 +01:00
Philipp Hagemeister 7b86336325 release 2014.12.15 2014-12-15 01:42:54 +01:00
Philipp Hagemeister b8e8032f4d release 2014.12.14 2014-12-14 00:22:00 +01:00
Philipp Hagemeister 31d0cf05e0 release 2014.12.13.1 2014-12-13 23:55:05 +01:00
Philipp Hagemeister e3a3ac788f release 2014.12.13 2014-12-13 23:17:33 +01:00
Philipp Hagemeister 9bb483f879 release 2014.12.12.7 2014-12-12 18:29:19 +01:00
Philipp Hagemeister a1fa9761da release 2014.12.12.5 2014-12-12 17:48:39 +01:00
Philipp Hagemeister b831f149de release 2014.12.12.4 2014-12-12 17:21:10 +01:00
Philipp Hagemeister 4deb5d6900 release 2014.12.12.3 2014-12-12 16:51:21 +01:00
Philipp Hagemeister c191693e04 release 2014.12.12.2 2014-12-12 15:59:45 +01:00
Philipp Hagemeister 6e834f71b5 release 2014.12.12.1 2014-12-12 03:39:03 +01:00
Philipp Hagemeister 709a4eaca6 release 2014.12.10.3 2014-12-10 15:22:15 +01:00
Philipp Hagemeister 54526d3ff3 release 2014.12.10.2 2014-12-10 14:42:05 +01:00
Philipp Hagemeister 4f929b4877 release 2014.12.10.1 2014-12-10 13:27:56 +01:00
Philipp Hagemeister 2309c33451 release 2014.12.10 2014-12-10 12:26:40 +01:00
Philipp Hagemeister 7be418c432 release 2014.12.06.1 2014-12-06 00:52:04 +01:00
Philipp Hagemeister ac7b35363e release 2014.12.06 2014-12-06 00:47:31 +01:00
Philipp Hagemeister ff3bc15613 release 2014.12.04.2 2014-12-04 17:54:49 +01:00
Philipp Hagemeister eb5bc48718 release 2014.12.04.1 2014-12-04 17:04:49 +01:00
Philipp Hagemeister f57eec1bcb release 2014.12.04 2014-12-04 08:41:10 +01:00
Philipp Hagemeister 45162344b7 release 2014.12.03 2014-12-03 12:19:51 +01:00
Philipp Hagemeister e7250b59e1 release 2014.12.01 2014-12-01 17:33:02 +01:00
Philipp Hagemeister e7453bd649 release 2014.11.27 2014-11-27 16:07:17 +01:00
Philipp Hagemeister 2515aa0ab0 release 2014.11.26.4 2014-11-26 23:02:39 +01:00
Philipp Hagemeister 3e3977a554 release 2014.11.26.3 2014-11-26 22:18:22 +01:00
Philipp Hagemeister 82e4cbf19a release 2014.11.26.1 2014-11-26 22:05:41 +01:00
Philipp Hagemeister c64a67ccfd release 2014.11.26 2014-11-26 10:50:48 +01:00
Philipp Hagemeister 8c8b0e4ffd release 2014.11.25.1 2014-11-25 14:38:09 +01:00
Philipp Hagemeister 8238e31efb release 2014.11.25 2014-11-25 10:03:04 +01:00
Philipp Hagemeister 85ea5698b8 Merge branch 'gh-pages' of github.com:rg3/youtube-dl into gh-pages 2014-11-24 23:54:08 +01:00
Philipp Hagemeister 3742c04ee0 release 2014.11.24 2014-11-24 23:52:36 +01:00
Jaime Marquínez Ferrándiz e1a90e58d1 Merge pull request #4299 from crdx/patch-1
fix typo
2014-11-24 21:41:31 +01:00
Sean S 5e2e5b8a22 fix small typo 2014-11-24 20:06:26 +00:00
Philipp Hagemeister 9ce1f77204 release 2014.11.23.1 2014-11-23 10:57:16 +01:00
Philipp Hagemeister 34edc2b0b8 release 2014.11.23 2014-11-23 10:03:00 +01:00
Philipp Hagemeister d1bbe1e28b release 2014.11.21.1 2014-11-21 23:23:55 +01:00
Philipp Hagemeister 4bdc0e241a release 2014.11.21 2014-11-21 10:49:58 +01:00
Philipp Hagemeister ff764dd711 release 2014.11.20 2014-11-20 08:59:58 +01:00
Philipp Hagemeister 9077c895d9 release 2014.11.16 2014-11-16 00:54:37 +01:00
Philipp Hagemeister d3fd131e01 release 2014.11.15.1 2014-11-15 15:24:58 +01:00
Philipp Hagemeister 593a53026d release 2014.11.15 2014-11-15 11:15:23 +01:00
Philipp Hagemeister 890f6ffcdb release 2014.11.14 2014-11-14 22:39:23 +01:00
Philipp Hagemeister 0e5a26df53 release 2014.11.13.3 2014-11-13 16:31:57 +01:00
Philipp Hagemeister 7c10780356 release 2014.11.13.2 2014-11-13 16:25:36 +01:00
Philipp Hagemeister ed3c5f675b release 2014.11.13.1 2014-11-13 15:50:04 +01:00
Philipp Hagemeister d9089f9e69 release 2014.11.13 2014-11-13 10:03:13 +01:00
Philipp Hagemeister 50215c5106 release 2014.11.12.1 2014-11-12 11:47:33 +01:00
Philipp Hagemeister af596e7e2c release 2014.11.12 2014-11-12 09:17:46 +01:00
Philipp Hagemeister 901ac74b81 release 2014.11.09 2014-11-09 22:37:21 +01:00
Philipp Hagemeister 424f72d298 release 2014.11.04 2014-11-04 23:47:01 +01:00
Philipp Hagemeister a67cbf349c release 2014.11.02.1 2014-11-02 10:34:28 +01:00
Philipp Hagemeister c75740b5da release 2014.10.30 2014-10-30 10:18:45 +01:00
Philipp Hagemeister 1b7b087361 release 2014.10.29 2014-10-29 23:36:12 +01:00
Philipp Hagemeister b2ac1e6bd7 release 2014.10.27 2014-10-27 02:46:47 +01:00
Philipp Hagemeister 08011414c9 release 2014.10.26.2 2014-10-26 21:52:22 +01:00
Philipp Hagemeister e86358349a release 2014.10.26.1 2014-10-26 21:07:45 +01:00
Philipp Hagemeister da3fad69c5 release 2014.10.26 2014-10-26 17:37:09 +01:00
Philipp Hagemeister 9f783b25d1 release 2014.10.25 2014-10-25 00:47:51 +02:00
Philipp Hagemeister 31d51aa70f release 2014.10.24 2014-10-24 14:57:44 +02:00
Philipp Hagemeister 23c6d465ad release 2014.10.23 2014-10-23 20:24:40 +02:00
Philipp Hagemeister 20af98619c release 2014.10.18 2014-10-18 20:25:57 +02:00
Philipp Hagemeister e906eeae09 release 2014.10.15 2014-10-15 12:42:46 +02:00
Philipp Hagemeister 5ceedab0e8 release 2014.10.13 2014-10-13 10:20:12 +02:00
Philipp Hagemeister 7f19047270 release 2014.10.12 2014-10-12 22:31:09 +02:00
Philipp Hagemeister b9c4c1b640 release 2014.10.05.2 2014-10-05 22:19:14 +02:00
Philipp Hagemeister d55df49929 release 2014.10.05 2014-10-05 08:00:01 +02:00
Philipp Hagemeister 507e12d727 release 2014.10.02 2014-10-02 15:38:46 +02:00
Philipp Hagemeister 7b445bb0a0 Document config file location on Windows (#1881) 2014-09-29 21:58:41 +02:00
Philipp Hagemeister 57d91e2c31 release 2014.09.29.2 2014-09-29 04:53:29 +02:00
Philipp Hagemeister f16919ebad release 2014.09.29.1 2014-09-29 02:11:19 +02:00
Philipp Hagemeister 29a75387d7 release 2014.09.28.1 2014-09-28 12:19:10 +02:00
Philipp Hagemeister 757d2fff32 release 2014.09.28 2014-09-28 09:52:47 +02:00
Philipp Hagemeister c07ddd87a1 release 2014.09.25 2014-09-25 02:02:48 +02:00
Philipp Hagemeister 3579a6dbe0 release 2014.09.24.1 2014-09-24 14:19:05 +02:00
Philipp Hagemeister babe46018b release 2014.09.24 2014-09-24 11:16:45 +02:00
Philipp Hagemeister 72166059b3 release 2014.09.22 2014-09-22 13:11:04 +02:00
Philipp Hagemeister 9ce623c24d release 2014.09.19 2014-09-19 10:02:47 +02:00
Philipp Hagemeister 890a1cf917 release 2014.09.18 2014-09-18 18:49:04 +02:00
Philipp Hagemeister 08f60a6db5 release 2014.09.16.1 2014-09-16 23:38:36 +02:00
Philipp Hagemeister 78a7220c20 release 2014.09.16 2014-09-16 10:14:37 +02:00
Philipp Hagemeister b22048e3fc release 2014.09.15.1 2014-09-15 15:31:35 +02:00
Philipp Hagemeister c837fc59b8 release 2014.09.15 2014-09-15 15:16:34 +02:00
Philipp Hagemeister 97c9abd4b0 release 2014.09.14.3 2014-09-14 16:56:12 +02:00
Philipp Hagemeister 1b4bf0f3d1 release 2014.09.12 2014-09-12 08:01:28 +02:00
Philipp Hagemeister 914ff382d0 release 2014.09.10.1 2014-09-10 16:43:10 +02:00
Philipp Hagemeister 84794766aa release 2014.09.10 2014-09-10 12:42:39 +02:00
Philipp Hagemeister df24469c79 release 2014.09.06 2014-09-06 15:40:10 +02:00
Philipp Hagemeister f3a2b50b09 release 2014.09.04.3 2014-09-04 16:38:14 +02:00
Philipp Hagemeister e1f869e5e4 release 2014.09.04.1 2014-09-04 04:55:09 +02:00
Philipp Hagemeister 48edbf72f4 release 2014.09.04 2014-09-04 01:35:38 +02:00
Philipp Hagemeister 96217a7344 release 2014.09.01.2 2014-09-02 00:12:43 +02:00
Philipp Hagemeister d1603ead12 release 2014.09.01.1 2014-09-01 00:30:50 +02:00
Philipp Hagemeister 3236a3c603 release 2014.09.01 2014-09-01 00:10:27 +02:00
Philipp Hagemeister 34be9175a2 release 2014.08.29 2014-08-29 01:10:21 +02:00
Philipp Hagemeister cfe7713497 release 2014.08.28.2 2014-08-28 18:09:04 +02:00
Philipp Hagemeister b1f4e7a747 release 2014.08.28.1 2014-08-28 14:07:25 +02:00
Philipp Hagemeister 132b1532c6 release 2014.08.28 2014-08-28 01:44:00 +02:00
Philipp Hagemeister 12ca402bd7 release 2014.08.27.1 2014-08-27 02:40:40 +02:00
Philipp Hagemeister 65d1813840 release 2014.08.27 2014-08-27 01:47:30 +02:00
Philipp Hagemeister 667ef5ede4 release 2014.08.26 2014-08-26 21:34:32 +02:00
Philipp Hagemeister 62f1cbb88d release 2014.08.25.3 2014-08-25 18:42:51 +02:00
Philipp Hagemeister cf84511178 release 2014.08.25.2 2014-08-25 16:54:37 +02:00
Philipp Hagemeister 67de8f0589 release 2014.08.25 2014-08-25 09:42:26 +02:00
Philipp Hagemeister 778bfd76a6 release 2014.08.24.6 2014-08-24 15:24:02 +02:00
Philipp Hagemeister 365843a199 release 2014.08.24.5 2014-08-24 07:01:38 +02:00
Philipp Hagemeister 0326f6eda5 release 2014.08.24.4 2014-08-24 06:48:32 +02:00
Philipp Hagemeister b095a9bc3a release 2014.08.24.3 2014-08-24 05:37:59 +02:00
Philipp Hagemeister 64bdc95e11 release 2014.08.24.2 2014-08-24 04:55:13 +02:00
Philipp Hagemeister 2ed065729f release 2014.08.24.1 2014-08-24 03:30:56 +02:00
Philipp Hagemeister 60a010f665 release 2014.08.24 2014-08-24 02:49:20 +02:00
Philipp Hagemeister 803d3bd32d release 2014.08.23 2014-08-23 15:28:52 +02:00
Philipp Hagemeister deff9e0884 release 2014.08.22.3 2014-08-22 18:45:37 +02:00
Philipp Hagemeister 0345c8d777 release 2014.08.22.2 2014-08-22 03:19:39 +02:00
Philipp Hagemeister b3ebf44676 release 2014.08.22.1 2014-08-22 03:05:41 +02:00
Philipp Hagemeister df9f4d2baa release 2014.08.21.3 2014-08-21 18:07:19 +02:00
Philipp Hagemeister e073b8fa28 release 2014.08.21.2 2014-08-21 13:14:11 +02:00
Philipp Hagemeister d95287d2ae release 2014.08.21.1 2014-08-21 12:24:57 +02:00
Philipp Hagemeister b24b55c27b release 2014.08.10 2014-08-10 19:51:13 +02:00
Philipp Hagemeister 25beeb8dc4 release 2014.08.05 2014-08-05 17:24:39 +02:00
Philipp Hagemeister af50cac100 release 2014.08.02.1 2014-08-02 18:20:51 +02:00
Philipp Hagemeister 2709569cb2 release 2014.08.02 2014-08-02 12:33:22 +02:00
Philipp Hagemeister d5289eb78f release 2014.07.30 2014-07-30 09:57:28 +02:00
Philipp Hagemeister 289ff5644d release 2014.07.25.1 2014-07-25 10:52:25 +02:00
Philipp Hagemeister b466c5d28f release 2014.07.25 2014-07-25 07:08:57 +02:00
Philipp Hagemeister 423fba294a release 2014.07.24 2014-07-24 11:27:51 +02:00
Philipp Hagemeister d504be6d68 release 2014.07.23.2 2014-07-23 02:29:07 +02:00
Philipp Hagemeister 71bd78a193 release 2014.07.23.1 2014-07-23 01:33:40 +02:00
Philipp Hagemeister d0e59bf20d release 2014.07.23 2014-07-23 01:21:49 +02:00
Philipp Hagemeister ddc86a33dc release 2014.07.22 2014-07-22 17:04:25 +02:00
Philipp Hagemeister a513c01a24 release 2014.07.21 2014-07-21 18:14:01 +02:00
Philipp Hagemeister f6e1f2630d release 2014.07.20.2 2014-07-20 23:26:41 +02:00
Philipp Hagemeister c8855ec895 release 2014.07.20.1 2014-07-20 22:00:12 +02:00
Philipp Hagemeister 25bb51bca3 release 2014.07.20 2014-07-20 18:55:39 +02:00
Philipp Hagemeister 5eda0b4c08 release 2014.07.15 2014-07-15 23:06:12 +02:00
Philipp Hagemeister bd4a929996 fix an HTML error 2014-07-13 17:26:52 +02:00
Philipp Hagemeister db4ee49745 link list of supported sites on main page 2014-07-13 17:26:46 +02:00
Philipp Hagemeister 809804b1b0 Clarify that the youtube-dl exe works without Python (Fixes #3249) 2014-07-13 17:26:24 +02:00
Philipp Hagemeister 453ae702ee release 2014.07.11.3 2014-07-11 17:30:26 +02:00
Philipp Hagemeister 2fbb7eb190 release 2014.07.11.2 2014-07-11 13:38:40 +02:00
Philipp Hagemeister 0a0fc250dc release 2014.07.11.1 2014-07-11 11:58:15 +02:00
Philipp Hagemeister e299dbb8c9 release 2014.07.11 2014-07-11 10:53:48 +02:00
Philipp Hagemeister 1c6cd59a03 release 2014.07.10 2014-07-10 14:57:55 +02:00
Philipp Hagemeister b94fac1143 release 2014.06.26 2014-06-26 17:34:09 +02:00
Philipp Hagemeister 44caf6a435 release 2014.06.25 2014-06-25 21:36:29 +02:00
Philipp Hagemeister d62ea494d9 release 2014.06.24.1 2014-06-24 09:15:30 +02:00
Philipp Hagemeister 6581294d11 release 2014.06.19 2014-06-19 17:18:09 +02:00
Philipp Hagemeister 2cdbb0ea2e Merge branch 'gh-pages' of github.com:rg3/youtube-dl into gh-pages 2014-06-16 10:56:08 +02:00
Philipp Hagemeister 1d083917ef release 2014.06.16 2014-06-16 10:55:14 +02:00
Philipp Hagemeister 7b51ff76aa Merge pull request #3070 from EvanHahn/homebrew-instructions
Add Homebrew install instructions
2014-06-12 20:26:15 +02:00
Evan Hahn 2a1c4c2d37 Add Homebrew install instructions 2014-06-12 10:41:08 -07:00
Philipp Hagemeister c3cdb616f3 release 2014.06.09 2014-06-09 23:21:53 +02:00
Philipp Hagemeister 11aeec39d7 release 2014.06.07 2014-06-07 16:46:01 +02:00
Philipp Hagemeister 864a09fff0 release 2014.06.04 2014-06-04 06:52:39 +02:00
Philipp Hagemeister 8629db7f23 release 2014.06.02 2014-06-02 10:50:31 +02:00
Philipp Hagemeister 389588e362 release 2014.05.31.4 2014-05-31 20:48:00 +02:00
Philipp Hagemeister 20bb589883 release 2014.05.19 2014-05-19 11:57:22 +02:00
Philipp Hagemeister 081cc94583 release 2014.05.16.1 2014-05-16 15:57:29 +02:00
Philipp Hagemeister 3081d368bc release 2014.05.16 2014-05-16 12:20:34 +02:00
Philipp Hagemeister a67fe93cd6 release 2014.05.13 2014-05-13 10:25:12 +02:00
Philipp Hagemeister 9190a734e6 release 2014.05.12 2014-05-12 16:42:31 +02:00
Philipp Hagemeister ee5d951160 release 2014.05.05 2014-05-05 03:17:54 +02:00
Philipp Hagemeister 57f7d997aa release 2014.04.30.1 2014-04-30 10:14:35 +02:00
Philipp Hagemeister 524b6a2fd0 release 2014.04.30 2014-04-30 02:11:34 +02:00
Philipp Hagemeister 3228c709d4 release 2014.04.21.6 2014-04-21 16:20:52 +02:00
Philipp Hagemeister c78dff0119 release 2014.04.21.5 2014-04-21 16:00:19 +02:00
Philipp Hagemeister de13322e71 release 2014.04.21.4 2014-04-21 15:28:05 +02:00
Philipp Hagemeister 14af0080ea release 2014.04.21.3 2014-04-21 12:42:49 +02:00
Philipp Hagemeister 455f4dbd65 release 2014.04.21.2 2014-04-21 07:17:26 +02:00
Philipp Hagemeister db831975a2 release 2014.04.21.1 2014-04-21 06:40:19 +02:00
Philipp Hagemeister 0fb9d3e3f8 release 2014.04.21 2014-04-21 02:49:04 +02:00
Philipp Hagemeister 3612000f81 release 2014.04.19 2014-04-19 12:46:42 +02:00
Philipp Hagemeister 6773911609 release 2014.04.13 2014-04-13 03:27:36 +02:00
Philipp Hagemeister 783436dda9 release 2014.04.11.2 2014-04-11 09:49:02 +02:00
Philipp Hagemeister 685b357631 release 2014.04.11.1 2014-04-11 01:34:16 +02:00
Philipp Hagemeister 030c5d014a release 2014.04.07.4 2014-04-07 23:25:49 +02:00
Philipp Hagemeister 22a41b7d30 release 2014.04.07.3 2014-04-07 22:50:20 +02:00
Philipp Hagemeister 9667720e74 release 2014.04.07.2 2014-04-07 21:44:20 +02:00
Philipp Hagemeister 36152c3da7 release 2014.04.07.1 2014-04-07 15:46:26 +02:00
Philipp Hagemeister 52351c0372 release 2014.04.07 2014-04-07 13:49:05 +02:00
Philipp Hagemeister 9c0e70fd32 release 2014.04.04.7 2014-04-04 23:04:55 +02:00
Philipp Hagemeister e32838d54c release 2014.04.04.6 2014-04-04 22:51:49 +02:00
Philipp Hagemeister 2d2d9d1108 release 2014.04.04.5 2014-04-04 22:32:24 +02:00
Philipp Hagemeister 9c323f4926 release 2014.04.04.4 2014-04-04 22:21:38 +02:00
Philipp Hagemeister 2de3613cd7 release 2014.04.04.2 2014-04-04 02:38:36 +02:00
Philipp Hagemeister bbd7253743 release 2014.04.04.3 2014-04-04 02:15:41 +02:00
Philipp Hagemeister 5e6e2c1ac6 release 2014.04.04.1 2014-04-04 00:31:19 +02:00
Philipp Hagemeister f17a9ace3a release 2014.04.04 2014-04-04 00:10:34 +02:00
Philipp Hagemeister f7156ab56e release 2014.04.03.3 2014-04-03 16:25:04 +02:00
Philipp Hagemeister 693e0ae049 release 2014.04.03.2 2014-04-03 15:31:15 +02:00
Philipp Hagemeister 7b6e6131ad release 2014.04.03.1 2014-04-03 09:06:51 +02:00
Philipp Hagemeister 8e5e8656e0 release 2014.04.03 2014-04-03 06:15:28 +02:00
Philipp Hagemeister 38c43f6644 release 2014.04.02 2014-04-02 14:33:15 +02:00
Philipp Hagemeister e6c9e944ac release 2014.04.01.3 2014-04-01 13:20:58 +02:00
Philipp Hagemeister 7a22d25719 release 2014.04.01.2 2014-04-01 06:04:49 +02:00
Philipp Hagemeister 934e86ff0c release 2014.04.01.1 2014-04-01 00:27:55 +02:00
Philipp Hagemeister 68cba3e46a release 2014.04.01 2014-04-01 00:06:09 +02:00
Philipp Hagemeister 08d4cab829 release 2014.03.30.1 2014-03-30 16:04:42 +02:00
Philipp Hagemeister 2073acb594 release 2014.03.30 2014-03-30 07:28:27 +02:00
Philipp Hagemeister dcd600069e release 2014.03.29 2014-03-29 14:05:10 +01:00
Philipp Hagemeister 15d57b9673 release 2014.03.28 2014-03-28 23:18:09 +01:00
Philipp Hagemeister f79e8b7ab8 release 2014.03.27.1 2014-03-27 02:54:40 +01:00
Philipp Hagemeister c8b64cf1d6 release 2014.03.27 2014-03-27 02:26:02 +01:00
Philipp Hagemeister ea2dadfb5a release 2014.03.25.1 2014-03-25 14:30:40 +01:00
Philipp Hagemeister f3c9b0c286 release 2014.03.25 2014-03-25 04:06:07 +01:00
Philipp Hagemeister 65729455c7 release 2014.03.24.5 2014-03-24 23:26:34 +01:00
Philipp Hagemeister f7cc23f1e9 release 2014.03.24.4 2014-03-24 22:14:37 +01:00
Philipp Hagemeister 41e7dd23d1 release 2014.03.24.3 2014-03-24 17:15:13 +01:00
Philipp Hagemeister 9c7a1bbee9 release 2014.03.24.2 2014-03-24 15:03:47 +01:00
Philipp Hagemeister e76a774494 release 2014.03.24.1 2014-03-24 10:29:08 +01:00
Philipp Hagemeister e72711c9b0 release 2013.03.24.2 2014-03-24 02:28:53 +01:00
Philipp Hagemeister 9566442ef3 release 2013.03.24 2014-03-24 01:46:43 +01:00
Philipp Hagemeister 0e362aee7e release 2014.03.23 2014-03-23 16:09:31 +01:00
Philipp Hagemeister c65f61621c release 2014.03.21.5 2014-03-21 14:55:16 +01:00
Philipp Hagemeister 191c0f4e90 release 2014.03.21.3 2014-03-21 02:12:45 +01:00
Philipp Hagemeister 8b64d68089 release 2014.03.21.2 2014-03-21 01:44:34 +01:00
Philipp Hagemeister bb5a8bca6a release 2014.03.21.1 2014-03-21 01:22:30 +01:00
Philipp Hagemeister 2a403b5d4f release 2014.03.21 2014-03-21 00:44:35 +01:00
Philipp Hagemeister f6cdbe792e release 2014.03.20 2014-03-20 16:38:42 +01:00
Philipp Hagemeister 98328f6693 release 2014.03.18.1 2014-03-18 14:49:12 +01:00
Philipp Hagemeister bab31e5c21 release 2014.03.17 2014-03-17 14:52:02 +01:00
Philipp Hagemeister 02bc59198f release 2014.03.12 2014-03-12 14:54:35 +01:00
Philipp Hagemeister 0a27c5551b release 2014.03.11 2014-03-11 16:53:42 +01:00
Philipp Hagemeister 86da332b5b release 2014.03.10 2014-03-10 13:07:07 +01:00
Philipp Hagemeister 62d8317a73 release 2014.03.07.1 2014-03-07 16:08:43 +01:00
Philipp Hagemeister 41366765df release 2014.03.07 2014-03-07 06:47:07 +01:00
Philipp Hagemeister 6d9061d3e0 release 2014.03.06 2014-03-07 00:05:18 +01:00
Philipp Hagemeister 515403b421 release 2014.03.04.2 2014-03-04 21:02:20 +01:00
Philipp Hagemeister e5eb46a38a release 2014.03.04.1 2014-03-04 03:43:47 +01:00
Philipp Hagemeister 4948cca3fd release 2014.03.04 2014-03-04 03:36:27 +01:00
Philipp Hagemeister 47b5a0247f release 2014.03.03 2014-03-03 13:55:59 +01:00
Philipp Hagemeister bcb0eb70e0 release 2014.02.28 2014-02-28 14:57:09 +01:00
Philipp Hagemeister 3fb6d6a6ee release 2014.02.27.1 2014-02-27 16:14:05 +01:00
Philipp Hagemeister b7403001f3 release 2014.02.27 2014-02-27 07:28:28 +01:00
Philipp Hagemeister d5b8d93710 release 2014.02.26 2014-02-26 00:39:22 +01:00
Philipp Hagemeister 4b5e477e60 release 2014.02.25.1 2014-02-25 11:23:12 +01:00
Philipp Hagemeister c9875e8879 release 2014.02.25 2014-02-25 01:51:41 +01:00
Philipp Hagemeister d602cd1cb5 release 2014.02.24 2014-02-24 09:49:03 +01:00
Philipp Hagemeister de2966d1ea release 2014.02.22.1 2014-02-22 23:21:46 +01:00
Philipp Hagemeister 7eed3e3843 release 2014.02.22 2014-02-22 15:15:26 +01:00
Philipp Hagemeister dd00e42333 release 2014.02.21.1 2014-02-21 18:24:18 +01:00
Philipp Hagemeister 58b677fc73 release 2014.02.21 2014-02-21 12:25:46 +01:00
Philipp Hagemeister b32ae92a4e release 2014.02.20 2014-02-20 13:21:29 +01:00
Philipp Hagemeister 108d3b83e9 release 2014.02.19.1 2014-02-19 01:31:59 +01:00
Philipp Hagemeister 97dd4404b0 release 2014.02.19 2014-02-19 01:23:02 +01:00
Philipp Hagemeister 573bedb2fc release 2014.02.17 2014-02-17 11:43:32 +01:00
Philipp Hagemeister 263ce0864f release 2014.02.13 2014-02-13 19:22:40 +01:00
Philipp Hagemeister 6e4d131e23 release 2014.02.10 2014-02-10 02:06:00 +01:00
Philipp Hagemeister d79a7ae4b9 release 2014.02.08.2 2014-02-08 19:41:53 +01:00
Philipp Hagemeister 3b4af451a4 release 2014.02.08.1 2014-02-08 18:42:35 +01:00
Philipp Hagemeister 37d767ec49 release 2014.02.08 2014-02-08 16:34:54 +01:00
Philipp Hagemeister 59afad8ba4 release 2014.02.06.3 2014-02-07 01:48:36 +01:00
Philipp Hagemeister bdc68eb84a release 2014.02.06.2 2014-02-06 15:50:33 +01:00
Philipp Hagemeister e242fb16d4 release 2014.02.06.1 2014-02-06 11:34:29 +01:00
Philipp Hagemeister d24ff9356e release 2014.02.06 2014-02-06 03:32:58 +01:00
Philipp Hagemeister 9bd7125b93 release 2014.02.05 2014-02-05 21:44:31 +01:00
Philipp Hagemeister 420cefb071 release 2014.02.04.1 2014-02-04 23:38:10 +01:00
Philipp Hagemeister a486999504 release 2014.02.04 2014-02-04 16:36:11 +01:00
Philipp Hagemeister 77d02342d1 release 2014.02.03.1 2014-02-03 15:26:38 +01:00
Philipp Hagemeister f29214b135 release 2014.02.03 2014-02-03 06:59:02 +01:00
Philipp Hagemeister 809028e9fa release 2014.01.30.2 2014-01-30 19:36:46 +01:00
Philipp Hagemeister 1dd2c27354 release 2014.01.30.1 2014-01-30 05:57:09 +01:00
Philipp Hagemeister 7651ced5d1 release 2014.01.30 2014-01-30 05:02:45 +01:00
Philipp Hagemeister cd8b212f5b release 2014.01.29 2014-01-29 11:28:22 +01:00
Philipp Hagemeister c53f5c1332 release 2014.01.28.1 2014-01-28 03:48:33 +01:00
Philipp Hagemeister 30d89fba3a release 2014.01.27.2 2014-01-27 19:25:32 +01:00
Philipp Hagemeister 58efc4e3a8 release 2014.01.27.1 2014-01-27 07:12:51 +01:00
Philipp Hagemeister 2c714450ca release 2014.01.27 2014-01-27 07:09:22 +01:00
Philipp Hagemeister 9c07dd21e8 [donations] Linkify bitcoin 2014-01-27 03:06:21 +01:00
Philipp Hagemeister 27cf21a718 [donations] Mention BTC (#2238) 2014-01-27 03:04:46 +01:00
Philipp Hagemeister 590fe11a01 release 2014.01.23.4 2014-01-24 00:10:58 +01:00
Philipp Hagemeister 66e2e6495b release 2014.01.23.3 2014-01-24 00:00:40 +01:00
Philipp Hagemeister 2999c9fc2b release 2014.01.23.2 2014-01-23 19:15:46 +01:00
Philipp Hagemeister 7362738116 release 2014.01.23.1 2014-01-23 10:41:47 +01:00
Philipp Hagemeister 35c171b552 release 2014.01.23 2014-01-23 00:32:05 +01:00
Philipp Hagemeister 77941a2ec3 release 2014.01.22.5 2014-01-22 22:15:37 +01:00
Philipp Hagemeister 1e1e007962 release 2014.01.22.4 2014-01-22 21:16:54 +01:00
Philipp Hagemeister 3749d754de release 2014.01.22.3 2014-01-22 14:55:38 +01:00
Philipp Hagemeister c079f0aeea release 2014.01.22.2 2014-01-22 14:36:47 +01:00
Philipp Hagemeister d513c0e282 release 2014.01.22.1 2014-01-22 02:26:30 +01:00
Philipp Hagemeister 647c4ff6e2 release 2014.01.22 2014-01-22 00:24:23 +01:00
Philipp Hagemeister a41b958624 release 2014.01.21.1 2014-01-21 18:27:20 +01:00
Philipp Hagemeister 55baadab26 release 2014.01.21 2014-01-21 14:12:26 +01:00
Philipp Hagemeister 4d1261086a release 2014.01.20 2014-01-20 11:52:39 +01:00
Philipp Hagemeister 632cc94b53 release 2014.01.17.2 2014-01-17 04:27:58 +01:00
Philipp Hagemeister f147df731c release 2013.01.17.1 2014-01-17 03:13:08 +01:00
Philipp Hagemeister 32c72b8b45 release 2013.01.17 2014-01-17 02:17:28 +01:00
Philipp Hagemeister 516b8fc6d9 Update format documentation (Closes #2158) 2014-01-17 01:42:52 +01:00
Philipp Hagemeister aeeca4fa5c release 2014.01.08 2014-01-08 23:47:26 +01:00
Philipp Hagemeister 831eb2bee7 release 2014.01.07.5 2014-01-07 10:13:29 +01:00
Philipp Hagemeister fadea0241b release 2014.01.07.3 2014-01-07 08:34:43 +01:00
Philipp Hagemeister 604543e9a1 release 2014.01.07.1 2014-01-07 08:18:36 +01:00
Philipp Hagemeister 7cdf324189 release 2014.01.07 2014-01-07 05:37:53 +01:00
Philipp Hagemeister 1af2a670f8 release 2014.01.06.1 2014-01-06 19:29:39 +01:00
Philipp Hagemeister e4a93bdc2f release 2014.01.06 2014-01-06 17:50:12 +01:00
Philipp Hagemeister 216955eed2 release 2014.01.05.6 2014-01-05 12:13:00 +01:00
Philipp Hagemeister 7e8fcedf9b release 2014.01.05.5 2014-01-05 06:00:47 +01:00
Philipp Hagemeister dcc555a4eb release 2014.01.05.4 2014-01-05 05:45:45 +01:00
Philipp Hagemeister a7b4b8b819 release 2014.01.05.3 2014-01-05 05:34:43 +01:00
Philipp Hagemeister 912c10737a [Atom feed] Make all dates valid 2014-01-05 05:15:50 +01:00
Philipp Hagemeister 898535e6fe Improve Atom feed (#2081) 2014-01-05 05:03:33 +01:00
Philipp Hagemeister 0e4c82eea4 release 2014.01.05.1 2014-01-05 04:52:35 +01:00
Philipp Hagemeister 8b20599d6f release 2014.01.03 2014-01-03 12:18:39 +01:00
Philipp Hagemeister 97c7ea358c release 2013.12.26 2013-12-26 22:00:58 +01:00
Philipp Hagemeister 3b506e4652 release 2013.12.23.4 2013-12-23 05:14:39 +01:00
Philipp Hagemeister d952e4ca00 release 2013.12.23.3 2013-12-23 04:46:57 +01:00
Philipp Hagemeister c817e24689 release 2013.12.23.2 2013-12-23 04:33:44 +01:00
Philipp Hagemeister bfacf9503c release 2013.12.23.1 2013-12-23 04:22:43 +01:00
Philipp Hagemeister 95f310dd4e release 2013.12.23 2013-12-23 03:49:04 +01:00
Philipp Hagemeister 977584c07e release 2013.12.20 2013-12-20 17:11:32 +01:00
Philipp Hagemeister 4bfb57ea3e release 2013.12.17.2 2013-12-17 13:05:32 +01:00
Philipp Hagemeister 9eacff6377 release 2013.12.17.1 2013-12-17 04:16:10 +01:00
Philipp Hagemeister 566ec7305b release 2013.12.17 2013-12-17 02:54:19 +01:00
Philipp Hagemeister 9d69b0e1e8 release 2013.12.16.7 2013-12-16 22:23:23 +01:00
Philipp Hagemeister ef3cf84635 release 2013.12.16.6 2013-12-16 21:49:53 +01:00
Philipp Hagemeister 5c0d5fd588 release 2013.12.16.4 2013-12-16 21:14:07 +01:00
Philipp Hagemeister 21bff54f24 release 2013.12.16.3 2013-12-16 14:46:48 +01:00
Philipp Hagemeister a06fb40105 release 2013.12.16.2 2013-12-16 14:20:46 +01:00
Philipp Hagemeister 5d60fe65c8 release 2013.12.16.1 2013-12-16 06:14:46 +01:00
Philipp Hagemeister 3f60bdf21c release 2013.12.16 2013-12-16 04:48:44 +01:00
Philipp Hagemeister 3f0efb919a release 2013.12.11.2 2013-12-11 09:28:50 +01:00
Philipp Hagemeister 128fd87c1d release 2013.12.11.1 2013-12-11 08:59:01 +01:00
Philipp Hagemeister b2c4d5023c release 2013.12.11 2013-12-11 08:52:00 +01:00
Philipp Hagemeister 12c220af97 release 2013.12.10 2013-12-10 11:58:50 +01:00
Philipp Hagemeister d5bd62fa47 release 2013.12.09.4 2013-12-09 20:11:45 +01:00
Philipp Hagemeister a002dc993a release 2013.12.09.2 2013-12-09 18:37:18 +01:00
Philipp Hagemeister 150fc5cece release 2013.12.09.1 2013-12-09 04:12:20 +01:00
Philipp Hagemeister e0156de904 release 2013.12.09 2013-12-09 03:05:04 +01:00
Philipp Hagemeister 998e24adc9 release 2013.12.08.1 2013-12-08 07:35:56 +01:00
Philipp Hagemeister a0a0fa236e release 2013.12.08 2013-12-08 06:59:04 +01:00
Philipp Hagemeister bad94a7022 release 2013.12.04 2013-12-04 14:21:30 +01:00
Philipp Hagemeister 09f355f73b release 2013.12.03 2013-12-03 13:18:19 +01:00
Philipp Hagemeister a1ce6f914d release 2013.12.02 2013-12-02 15:01:38 +01:00
Philipp Hagemeister abf0865149 release 2013.11.29 2013-11-29 03:38:08 +01:00
Philipp Hagemeister ffa5af300e release 2013.11.28.1 2013-11-28 06:20:08 +01:00
Philipp Hagemeister 79806ab5a3 release 2013.11.28 2013-11-28 05:53:55 +01:00
Philipp Hagemeister 88584f74b5 release 2013.11.26 2013-11-26 10:48:14 +01:00
Philipp Hagemeister 7079c07db2 release 2013.11.25.3 2013-11-25 22:57:46 +01:00
Philipp Hagemeister bd2cf1dd4f release 2013.11.25.2 2013-11-25 15:49:04 +01:00
Philipp Hagemeister 035ee9c090 release 2013.11.25.1 2013-11-25 06:09:46 +01:00
Philipp Hagemeister 94a36fdcea release 2013.11.25 2013-11-25 03:45:35 +01:00
Philipp Hagemeister fcaafe5619 Remove duplicate list from documentation
Users are prone to overread the introductory text and conclude that just because an extractor is missing in this list, youtube-dl does not support the site.
2013-11-24 07:58:32 +01:00
Philipp Hagemeister 2097054c58 release 2013.11.24.1 2013-11-24 07:55:07 +01:00
Philipp Hagemeister aa6e508a39 release 2013.11.24 2013-11-24 07:33:47 +01:00
Philipp Hagemeister 1e4364dad6 release 2013.11.22.2 2013-11-22 23:28:10 +01:00
Philipp Hagemeister 0c0e6a64a3 release 2013.11.22.1 2013-11-22 20:25:49 +01:00
Philipp Hagemeister dde4488dd2 release 2013.11.22 2013-11-22 17:48:53 +01:00
Philipp Hagemeister 1fce91664e release 2013.11.21 2013-11-21 14:02:04 +01:00
Philipp Hagemeister fa79991c9a release 2013.11.20 2013-11-20 07:32:28 +01:00
Philipp Hagemeister aa0740099f release 2013.11.19 2013-11-19 12:51:45 +01:00
Philipp Hagemeister 907ea8357a release 2013.11.18.1 2013-11-18 14:01:34 +01:00
Philipp Hagemeister d71ce54c9d release 2013.11.18 2013-11-18 13:34:28 +01:00
Philipp Hagemeister 6da31d3f46 release 2013.11.17 2013-11-17 22:24:23 +01:00
Philipp Hagemeister 2488c5a094 release 2013.11.15.1 2013-11-15 14:37:46 +01:00
Philipp Hagemeister 0d2799f1ab release 2013.11.15 2013-11-15 02:05:19 +01:00
Philipp Hagemeister a429266549 release 2013.11.13 2013-11-13 11:12:44 +01:00
Philipp Hagemeister c4e7461ac4 release 2013.11.11 2013-11-11 18:39:21 +01:00
Philipp Hagemeister 757e64afc6 release 2013.11.07 2013-11-07 11:09:17 +01:00
Philipp Hagemeister 0a7456c6a7 release 2013.11.06.1 2013-11-06 22:15:41 +01:00
Philipp Hagemeister 149f358828 release 2013.11.06 2013-11-06 14:09:37 +01:00
Philipp Hagemeister aaa19f7d06 release 2013.11.03 2013-11-03 15:53:20 +01:00
Philipp Hagemeister 0daef4a0e4 release 2013.11.02 2013-11-02 11:23:54 +01:00
Philipp Hagemeister e2d49a0b93 release 2013.10.30 2013-10-30 01:18:58 +01:00
Philipp Hagemeister e3e599a555 release 2013.10.29 2013-10-29 06:50:46 +01:00
Philipp Hagemeister f41816778d release 2013.10.28 2013-10-28 11:39:32 +01:00
Philipp Hagemeister 009dc7c5a5 release 2013.10.23.2 2013-10-23 18:43:11 +02:00
Philipp Hagemeister 46fb9e95a0 release 2013.10.23.1 2013-10-23 15:13:57 +02:00
Philipp Hagemeister c7b6aa8db1 release 2013.10.23 2013-10-23 00:10:29 +02:00
Philipp Hagemeister 1b3b408cc4 release 2013.10.22 2013-10-22 23:01:36 +02:00
Philipp Hagemeister e5adce03ae release 2013.10.18.2 2013-10-18 23:25:05 +02:00
Philipp Hagemeister 4478038b58 release 2013.10.18.1 2013-10-18 11:47:32 +02:00
Philipp Hagemeister 275fe0b84a release 2013.10.18 2013-10-18 11:20:38 +02:00
Philipp Hagemeister e703403b01 release 2013.10.17 2013-10-17 02:23:59 +02:00
Philipp Hagemeister 3014883d87 release 2013.10.15 2013-10-15 12:29:40 +02:00
Philipp Hagemeister 16200ac861 release 2013.10.09 2013-10-09 23:53:17 +02:00
Philipp Hagemeister 96bb7f8050 release 2013.10.07 2013-10-07 14:36:58 +02:00
Philipp Hagemeister 61cc6dede0 release 2013.10.06 2013-10-06 07:23:25 +02:00
Philipp Hagemeister 37b3fc7b06 release 2013.10.04 2013-10-04 00:40:16 +02:00
Philipp Hagemeister f793d89f34 release 2013.10.01.1 2013-10-01 14:47:16 +02:00
Philipp Hagemeister df20183f34 release 2013.10.01 2013-10-01 11:46:54 +02:00
Philipp Hagemeister 6e1229457b release 2013.09.29 2013-09-29 14:43:38 +02:00
Philipp Hagemeister ecfaa4b127 release 2013.09.24.2 2013-09-24 21:58:15 +02:00
Philipp Hagemeister c750a62ac5 release 2013.09.24.1 2013-09-24 21:42:50 +02:00
Philipp Hagemeister 8186c26b07 release 2013.09.24 2013-09-24 21:36:22 +02:00
Philipp Hagemeister da6ae4d37a release 2013.09.20.1 2013-09-20 23:02:39 +02:00
Philipp Hagemeister f0f42bac08 release 2013.09.20 2013-09-20 10:29:48 +02:00
Philipp Hagemeister 4ae561e7af release 2013.09.17 2013-09-17 17:09:18 +02:00
Philipp Hagemeister 1535e44625 Update download.html as well 2013-09-16 04:33:41 +02:00
Philipp Hagemeister 1cd889a44e Advertise wget and curl (Closes #1147) 2013-09-16 04:21:00 +02:00
Philipp Hagemeister ee087a70ed release 2013.09.16 2013-09-16 04:14:52 +02:00
Philipp Hagemeister 853c2b9864 release 2013.11.09 2013-09-11 11:37:40 +02:00
Philipp Hagemeister 347cfe3a4b release 2013.09.10 2013-09-10 11:55:57 +02:00
Philipp Hagemeister ed8aa85130 release 2013.09.07 2013-09-08 00:29:39 +02:00
Philipp Hagemeister d08c332b8b release 2013.09.06.1 2013-09-06 11:08:32 +02:00
Philipp Hagemeister 2b9e50990e release 2013.09.06 2013-09-06 10:17:43 +02:00
Philipp Hagemeister 65be4a1276 release 2013.09.05 2013-09-05 22:55:17 +02:00
Philipp Hagemeister 8c0e9c62c9 release 2013.09.04 2013-09-04 14:50:37 +02:00
Philipp Hagemeister 9649cab51f Clarify donations 2013-09-02 13:41:42 +02:00
Philipp Hagemeister 6794888237 Add donations page (Fixes #1344) 2013-09-02 12:35:27 +02:00
Jaime Marquínez Ferrándiz 0f783c4790 Readd yahoo to the list of supposed sites
It has been fixed for a while.
2013-08-31 15:26:16 +02:00
Jaime Marquínez Ferrándiz 23ed3711fc Added the template for the supported sites and the generated page.
Link to that page in documentation.html
2013-08-31 15:09:41 +02:00
Philipp Hagemeister dbd043409c release 2013.08.30 2013-08-30 21:23:11 +02:00
Philipp Hagemeister 3d34010890 release 2013.08.29 2013-08-30 00:06:37 +02:00
Philipp Hagemeister 1a1ebb10a2 release 2013.08.28.1 2013-08-28 19:29:10 +02:00
Philipp Hagemeister e9f671d34f release 2013.08.28 2013-08-27 23:42:12 +02:00
Philipp Hagemeister d4d6cce692 release 2013.08.27 2013-08-27 02:36:41 +02:00
Philipp Hagemeister cd73b9f9df release 2013.08.23 2013-08-23 23:42:40 +02:00
Philipp Hagemeister 721656267b release 2013.08.22 2013-08-22 23:33:47 +02:00
Philipp Hagemeister 0d3e82002d release 2013.08.17 2013-08-17 08:47:02 +02:00
Philipp Hagemeister f42ceac10d release 2013.08.15 2013-08-15 22:53:48 +02:00
Philipp Hagemeister 1415d02075 release 2013.08.14 2013-08-14 10:23:40 +02:00
Philipp Hagemeister bcdbda3abe release 2013.08.09 2013-08-09 15:52:30 +02:00
Philipp Hagemeister cde9507171 release 2013.08.08.1 2013-08-08 20:49:29 +02:00
Philipp Hagemeister 3d3d76e745 release 2013.08.08 2013-08-08 00:52:44 +02:00
Philipp Hagemeister a7d6fa532a release 2013.08.02 2013-08-02 13:37:57 +02:00
Philipp Hagemeister d8fb038cca release 2013.07.31 2013-07-31 10:58:43 +02:00
Philipp Hagemeister 42fe48f81d release 2013.07.25.2 2013-07-25 23:03:17 +02:00
Philipp Hagemeister cc86b1e78c release 2013.07.25.1 2013-07-25 10:11:29 +02:00
Philipp Hagemeister 45059e938c release 2013.07.25 2013-07-25 09:38:41 +02:00
Philipp Hagemeister 80cbf875da release 2013.07.24.2 2013-07-24 21:25:54 +02:00
Philipp Hagemeister b4fc4c0cae release 2013.07.23.1 2013-07-23 18:44:25 +02:00
Philipp Hagemeister 57adf7b86b release 2013.07.23 2013-07-23 15:55:34 +02:00
Philipp Hagemeister 18e616ccf2 release 2013.07.19 2013-07-19 23:49:46 +02:00
Philipp Hagemeister 86cc15eb06 release 2013.07.18 2013-07-18 12:49:56 +02:00
Philipp Hagemeister 8f61efc5d9 release 2013.07.17.1 2013-07-17 11:26:29 +02:00
Philipp Hagemeister 1a21103b9f release 2013.07.17 2013-07-17 01:27:10 +02:00
Philipp Hagemeister 7269f7f6f8 release 2013.07.12 2013-07-12 00:03:14 +02:00
Philipp Hagemeister 582d80020d release 2013.07.11 2013-07-11 22:04:57 +02:00
Philipp Hagemeister f8d6958341 release 2013.07.10 2013-07-10 11:41:37 +02:00
Philipp Hagemeister 1824d80813 release 2013.07.08.1 2013-07-08 02:09:19 +02:00
Philipp Hagemeister 604ea6fcc3 release 2013.07.08 2013-07-08 01:35:29 +02:00
Philipp Hagemeister f31532a09e release 2013.07.07.01 2013-07-07 17:18:02 +02:00
Philipp Hagemeister 36128970bc release 2013.07.05 2013-07-05 15:14:15 +02:00
Philipp Hagemeister 95f17ed689 release 2013.07.04 2013-07-04 18:11:49 +02:00
Philipp Hagemeister 5d23097037 release 2013.07.02 2013-07-02 09:23:11 +02:00
Philipp Hagemeister b4a0cb65d2 release 2013.06.34.4 2013-06-29 20:27:48 +02:00
Philipp Hagemeister f172f50972 release 2013.06.34.3 2013-06-29 17:36:18 +02:00
Philipp Hagemeister ddd7e3903d release 2013.06.34.2 2013-06-27 21:06:56 +02:00
Philipp Hagemeister c052f7300c release 2013.06.34.1 2013-06-27 18:01:54 +02:00
Philipp Hagemeister 375db3649b release 2013.06.34 2013-06-27 13:06:57 +02:00
Philipp Hagemeister 4bcac9eadc release 2013.06.33 2013-06-25 22:45:44 +02:00
Philipp Hagemeister f2392cc419 release 2013.06.32 2013-06-25 21:05:34 +02:00
Philipp Hagemeister 4bba8b41a0 release 2013.06.31 2013-06-25 18:43:38 +02:00
Philipp Hagemeister 1deaa4f150 release 2013.06.30 2013-06-25 12:31:16 +02:00
Philipp Hagemeister e6e5130471 release 2013.06.29 2013-06-24 14:59:54 +02:00
Philipp Hagemeister eb409ed6e1 release 2013.06.28 2013-06-24 12:46:24 +02:00
Philipp Hagemeister 79ce12a211 release 2013.06.27 2013-06-24 10:42:51 +02:00
Philipp Hagemeister 93dad3d78c release 2013.06.26 2013-06-24 01:09:03 +02:00
Philipp Hagemeister 064abef36f release 2013.06.25 2013-06-24 00:23:35 +02:00
Philipp Hagemeister 6356d0fecd Merge branch 'gh-pages' of github.com:rg3/youtube-dl into gh-pages 2013-06-23 23:55:13 +02:00
Philipp Hagemeister 5ac0aa0a56 merge 2013-06-23 23:54:49 +02:00
Philipp Hagemeister 96c4401076 release 2013.06.23 2013-06-23 23:53:53 +02:00
Philipp Hagemeister 97b8fca23b release 2013.06.23 2013-06-23 23:51:30 +02:00
Philipp Hagemeister 4d45501232 Merge pull request #891 from Lx/gh-pages-conf-format
describe permitted .conf file content
2013-06-23 09:42:34 -07:00
Philipp Hagemeister c523528639 document new PGP key 2013-06-21 23:05:33 +02:00
Philipp Hagemeister 0b9da457da release 2013.06.21 2013-06-21 00:41:48 +02:00
Alex Peters 5ea8a2b4ff describe permitted .conf file content
Determined by inspecting the documentation for the Python `shlex`
module:

http://docs.python.org/2/library/shlex.html
2013-06-17 21:55:15 +10:00
Philipp Hagemeister d2b808ce4b release 2013.05.23 2013-05-23 13:42:54 +02:00
Philipp Hagemeister dadcaecdfa release 2013.05.14 2013-05-13 14:55:23 +02:00
Philipp Hagemeister 46d641a918 release 2013.05.10 2013-05-10 02:00:07 +02:00
Philipp Hagemeister 1feb835874 release 2013.05.07 2013-05-05 21:25:13 +02:00
Philipp Hagemeister 4fd46f8015 release 2013.05.05 2013-05-04 12:34:53 +02:00
Philipp Hagemeister 8a42178a6a release 2013.05.04 2013-05-04 07:17:52 +02:00
Philipp Hagemeister bb224c7c78 release 2013.05.01 2013-05-01 14:11:35 +02:00
Philipp Hagemeister ff99565e9b release 2013.04.31 2013-04-30 19:56:24 +02:00
Philipp Hagemeister 532122da0f release 2013.04.30 2013-04-30 18:27:06 +02:00
Philipp Hagemeister abcefe99a1 release 2013.04.28 2013-04-28 16:35:35 +02:00
Philipp Hagemeister 4fd463e86c Disable notes 2013-04-28 16:22:55 +02:00
Philipp Hagemeister 81c8e577ae release 2013.04.27 2013-04-28 16:15:14 +02:00
Philipp Hagemeister bb6dfb9553 Add note about -t by default 2013-04-27 21:59:15 +02:00
Jaime Marquínez Ferrándiz 3bd90071be Document order of preference for format selection (related #798) 2013-04-23 10:29:39 +02:00
Philipp Hagemeister fa6bc20e23 release 2013.04.22 2013-04-22 20:37:04 +02:00
Philipp Hagemeister d8330e5a1d release 2013.04.21 2013-04-21 13:00:36 +02:00
Philipp Hagemeister d429f1174a release 2013.04.18 2013-04-18 06:42:52 +02:00
Filippo Valsorda 543b572f1f Merge pull request #788 from jaimeMF/pip
Add pip installation command
2013-04-17 10:57:50 -07:00
Jaime Marquínez Ferrándiz fda2d312d4 Add link to pypi and a note about the update 2013-04-16 14:47:08 +02:00
Jaime Marquínez Ferrándiz 4a715cbacb Add pip installation command
Closes #786
2013-04-16 14:24:05 +02:00
Philipp Hagemeister de1351917d Merge pull request #787 from jaimeMF/commands_style
Move the style of the commands in download.html to style.css
2013-04-16 05:15:27 -07:00
Jaime Marquínez Ferrándiz 8f273b2c05 Move the style of the commands in download.html to style.css 2013-04-16 14:06:16 +02:00
Philipp Hagemeister f5c68c0d86 release 2013.04.11 2013-04-11 18:49:43 +02:00
Filippo Valsorda 24a170f8fd new GPG key - this new one is signed by the old expiried one - closes #779 2013-04-06 21:55:12 +02:00
Filippo Valsorda c5ba189be4 release 2013.04.03 2013-04-06 20:53:44 +02:00
Ricardo Garcia 66661fc019 Change rg3.github.com to rg3.github.io in atom feed 2013-04-06 10:43:34 +02:00
Ricardo Garcia a87558dc12 Documentation license change (fixes #768) 2013-04-03 20:09:38 +02:00
Philipp Hagemeister 8dfa0b76e6 minor feed improvements 2013-03-29 21:43:48 +01:00
Philipp Hagemeister 18aab9843e Correct feed URL 2013-03-29 21:37:51 +01:00
Philipp Hagemeister 7d8127b4b8 release 2013.03.29 2013-03-29 21:29:41 +01:00
Philipp Hagemeister 2386b04c24 Ignore egg-info 2013-03-29 21:19:05 +01:00
Philipp Hagemeister cd0b44fd5e Add link to RSS feed (#758) 2013-03-29 21:18:48 +01:00
Philipp Hagemeister ad8385f1fc release 2013.02.25 2013-02-25 00:34:21 +01:00
Philipp Hagemeister a908b7b816 release 2013.02.22 2013-02-24 00:55:16 +01:00
Philipp Hagemeister 4d8623ed73 release 2012.02.22 2013-02-22 16:46:14 +01:00
Philipp Hagemeister 30b8dbc74f Add note 2013-02-22 16:39:03 +01:00
Philipp Hagemeister e06c1be12d release 2013.02.19 2013-02-19 00:09:58 +01:00
Philipp Hagemeister 1726ff8b2e release 2013.02.18 2013-02-18 23:40:33 +01:00
Philipp Hagemeister 566a15f553 release 2013.02.02 2013-02-02 14:51:42 +01:00
Philipp Hagemeister 99c81cae13 release 2013.02.01 2013-02-01 18:00:15 +01:00
Philipp Hagemeister aa26b834bb release 2013.01.28 2013-01-27 19:10:13 +01:00
Philipp Hagemeister 2644989c7c Merge pull request #636 from jaimeMF/gh-pages
Fix the path to the configuration file
2013-01-13 10:23:50 -08:00
Jaime Marquínez Ferrándiz 158d35395f Fix the path to the configuration file 2013-01-13 19:10:44 +01:00
Philipp Hagemeister 7fb298e717 release 2013.01.13 2013-01-12 22:23:55 +01:00
Philipp Hagemeister 680adff87b Merge branch 'gh-pages' of /home/phihag/projects/youtube-dl/. into gh-pages
Conflicts:
	download.html
	update/LATEST_VERSION
	update/versions.json
2013-01-12 18:20:20 +01:00
Philipp Hagemeister efad35fb70 release 2013.01.12 2013-01-12 18:15:24 +01:00
Philipp Hagemeister 56e6a59bc9 release 2013.01.11 2013-01-12 18:06:07 +01:00
Philipp Hagemeister 7aa6cb5973 release 2013.01.11 2013-01-11 08:13:12 +01:00
Philipp Hagemeister 45af080919 release 2013.01.08 2013-01-08 10:33:37 +01:00
Philipp Hagemeister ffea5c2832 Delete test_coverage 2013-01-06 23:38:56 +01:00
Philipp Hagemeister 77b66b65bd release 2013.01.06 2013-01-06 23:27:36 +01:00
Philipp Hagemeister 3d710b6438 fix signature 2013-01-03 19:08:58 +01:00
Philipp Hagemeister 0ec40a3f16 remove 2012.12.11 from json (doesn't follow new naming scheme) 2013-01-03 18:53:29 +01:00
Philipp Hagemeister 077eb935fb Github seems to (wrongly) optimize HTML pages now, fix download page 2013-01-03 18:35:54 +01:00
Filippo Valsorda 2f0d49e70a release 2013.01.02 2013-01-02 22:47:06 +01:00
Filippo Valsorda a07549dc0f Updating docs 2012-12-31 18:44:31 +01:00
Filippo Valsorda 377689c9ee New download page template 2012-12-31 18:35:23 +01:00
Filippo Valsorda 38a4d6feac populated the .gitignore as we switch branches often; cover/ will be test_coverage/ 2012-12-31 18:10:52 +01:00
Filippo Valsorda 35a06ec0f4 undo 2012.12.27, never released to the public 2012-12-30 21:00:31 +01:00
Filippo Valsorda c8ff21bd82 moved docs and updates generation scripts from gh-pages branch to master 2012-12-30 20:12:14 +01:00
Filippo Valsorda 0e91665e1a New updates system and release 2012.12.27 2012-12-27 01:59:12 +01:00
Filippo Valsorda 480c972687 Test code coverage data in /cover/ 2012-12-18 12:44:56 +01:00
Philipp Hagemeister 642b5170e3 Update Python version in downloads 2012-12-16 13:43:29 +01:00
Philipp Hagemeister 6065e27a13 Add wget installation instructions 2012-12-16 13:39:45 +01:00
Philipp Hagemeister c860c18297 Build everything with Python 3 2012-12-16 13:33:56 +01:00
Philipp Hagemeister 2840ae3a8f Update homepage 2012-12-11 17:52:56 +01:00
Philipp Hagemeister b51355ef99 Credit @FiloSottile 2012-12-11 17:43:50 +01:00
Philipp Hagemeister 1762b604b1 new release 2012-12-11 17:34:56 +01:00
Philipp Hagemeister 8c64aa48d2 New generate-download for the new build mechanism 2012-12-11 17:34:33 +01:00
Philipp Hagemeister 8c3a45b6e4 Update supported extractors 2012-11-30 08:38:14 +01:00
Philipp Hagemeister dc9e2d8e0c Document CollegeHumor and arte.tv 2012-11-28 17:57:06 +01:00
Philipp Hagemeister e7d727efc2 Drop 2.5 support 2012-11-28 03:30:44 +01:00
Philipp Hagemeister a89c5d20c7 2012.11.29 2012-11-27 18:37:27 +01:00
Philipp Hagemeister 044cb16590 release 2012.11.28 2012-11-27 16:16:51 +01:00
Philipp Hagemeister 81db2d8040 Document --restrict-filenames, remove stitle 2012-11-27 16:06:59 +01:00
Philipp Hagemeister 2811949f4c Update doc for 2012.11.27 2012-11-27 00:26:05 +01:00
Philipp Hagemeister 3a2c78a2da Document configuration options 2012-11-26 23:23:33 +01:00
Philipp Hagemeister 23cf656bd1 Automatically update copyright (Closes #549) 2012-11-26 11:19:40 +01:00
Philipp Hagemeister 41ff5333f7 ignore temporary files 2012-11-26 11:18:31 +01:00
Philipp Hagemeister 571d094858 Clarify required Python version (#549) 2012-11-26 11:18:08 +01:00
Philipp Hagemeister 96679b26cf release 2012.11.17 2012-11-26 04:05:23 +01:00
Philipp Hagemeister b7d1a2247e Python 2.6 instead of 2.5 in about (Closes #516) 2012-11-07 01:49:35 +01:00
Philipp Hagemeister 9746c518a5 update docs 2012-10-10 19:33:08 +02:00
Philipp Hagemeister 959fa33ef9 release youtube-dl 2012.10.09 2012-10-10 19:27:50 +02:00
Philipp Hagemeister a44c76cdf4 release release 2012.09.27 2012-09-28 13:27:54 +02:00
Philipp Hagemeister 6a3ff26a01 Add recent breakages to FAQ (#433) 2012-09-27 23:29:40 +02:00
Philipp Hagemeister 60a1c34662 Drop 2.5 compatibility 2012-09-27 15:33:20 +02:00
Philipp Hagemeister 49bd2af54f Clarify 402 2012-02-28 02:00:00 +01:00
Philipp Hagemeister a5999b83fd release 2012.02.27 2012-02-27 20:20:08 +01:00
Philipp Hagemeister 82eedd7d83 update docs for 2012.02.26 2012-02-27 00:43:20 +01:00
Philipp Hagemeister 759320387a Use stitle instead of title in doc 2012-01-12 20:15:54 +01:00
Philipp Hagemeister 0be94d4c6a release 2012.01.08b docs 2012-01-08 17:24:46 +01:00
Philipp Hagemeister 6bd03617e6 docs for 2012.01.05 2012-01-05 11:10:14 +01:00
Philipp Hagemeister a3d06cfe6d doc for 2011.12.18 2011-12-17 01:35:45 +01:00
Philipp Hagemeister cd891850ec Release 2011.12.15 2011-12-15 20:33:49 +01:00
Philipp Hagemeister cdee681a2c Release 2011.12.08 2011-12-08 21:41:43 +01:00
Philipp Hagemeister db2c6281eb Release 2011.11.23: Update webpage 2011-11-23 10:39:50 +01:00
Philipp Hagemeister 06a129c9dd Release 2011.11.22 2011-11-22 15:39:04 +01:00
Philipp Hagemeister 5a938b5d04 +Makefile 2011-11-22 15:38:32 +01:00
Philipp Hagemeister b0e67d2404 Release 2011.11.21 2011-11-21 21:54:04 +01:00
Philipp Hagemeister 92961845c3 docs for 2011.10.19 2011-10-19 00:40:55 +02:00
Philipp Hagemeister f9c3398449 release 2011.09.30 2011-09-30 09:08:53 +02:00
Philipp Hagemeister bffe76ba4b Remove empty <tr>s in index 2011-09-28 09:55:54 +02:00
Philipp Hagemeister 0d29baa7c6 Make documentation valid HTML 2011-09-28 09:53:50 +02:00
Philipp Hagemeister dd9d3efcfb Make FAQ valid HTML 2011-09-28 09:52:01 +02:00
Philipp Hagemeister 9014805f26 Fix stray </a> in FAQ 2011-09-28 09:50:04 +02:00
Philipp Hagemeister f973da57a4 Switch to HTML5 doctype 2011-09-28 09:49:19 +02:00
Philipp Hagemeister a073eb792f Update documentation and FAQ 2011-09-28 09:47:54 +02:00
Philipp Hagemeister f202278fe1 Close <ul> in first FAQ question 2011-09-28 09:29:19 +02:00
Philipp Hagemeister 3328f34eb3 Update homepage for 2011.09.27 2011-09-27 21:44:59 +02:00
Philipp Hagemeister 50d7a239ae Raise correct args in exception 2011-09-27 21:44:02 +02:00
Philipp Hagemeister 81eb98e4bd Update docs 2011-09-27 21:17:50 +02:00
Philipp Hagemeister 78ef538449 Updated bug reporting instructions in the FAQ 2011-09-27 20:58:37 +02:00
Philipp Hagemeister 6e3ed6385c Download regeneration works with python 2.5 2011-09-27 20:50:03 +02:00
Ricardo Garcia a2dafa7316 Add creative commons license to webpages 2011-09-27 20:17:34 +02:00
Philipp Hagemeister 86e5e08789 release 2011.09.18c 2011-09-17 00:59:45 +02:00
Philipp Hagemeister e397cf5b47 2011.09.18b 2011-09-16 22:33:54 +02:00
Philipp Hagemeister 3fd01c88c5 Update documentation for 2011.09.18 2011-09-15 19:29:53 +02:00
Philipp Hagemeister d2cbb552b7 Update documentation for 2011.09.17 2011-09-15 19:25:07 +02:00
Philipp Hagemeister 9c358a8982 Release 2011.09.16 2011-09-15 18:49:20 +02:00
Philipp Hagemeister 7923514ea0 Release 2011.09.15 2011-09-14 22:56:50 +02:00
Philipp Hagemeister 004759c8f2 Add a prominent Develop link in the documentation (Closes #155) 2011-09-14 00:42:45 +02:00
Philipp Hagemeister 95f029a5d8 Release 2011.09.14 2011-09-14 00:02:16 +02:00
Philipp Hagemeister ac47cfe369 Update generate-download 2011-09-13 23:06:09 +02:00
Philipp Hagemeister 463fe92d26 Release new version 2011-09-13 23:05:58 +02:00
Ricardo Garcia ba8a90b0bf Regenerate download page 2011-08-04 19:16:25 +02:00
Ricardo Garcia 3a6c68881a Regenerate download page 2011-03-29 20:33:22 +02:00
Ricardo Garcia a290358c2f Regenerate download page 2011-02-26 00:48:41 +01:00
Ricardo Garcia 17faaa4548 Regenerate download page 2011-02-25 21:54:55 +01:00
Ricardo Garcia 3ea19089f9 Regenerate download page 2011-02-25 20:13:32 +01:00
Ricardo Garcia 9077650490 Add Gergely Imreh to the author list 2011-02-20 18:03:52 +01:00
Ricardo Garcia 2ecbfb4cbc Properly update Python version requirement in the documentation 2011-01-30 13:14:45 +01:00
Ricardo Garcia 3519974510 Regenerate download page 2011-01-30 13:12:45 +01:00
Ricardo Garcia e76e4dc8b1 Add Paweł Paprota to the list of authors in the documentation 2011-01-30 13:12:10 +01:00
Ricardo Garcia 01e013b5e3 Mention support for YouTube.com user videos in the documentation 2011-01-30 13:10:22 +01:00
Ricardo Garcia 64ea994344 Update documentation to reflect Python 2.5 requirement 2011-01-30 12:56:52 +01:00
Ricardo Garcia a3e9208c01 Add Witold Baryluk to the author list 2011-01-21 18:19:43 +01:00
Ricardo Garcia ec0d2e46c5 Add link to the project page from the about page 2011-01-07 10:43:13 +01:00
Ricardo Garcia 1b3dbefb04 Modify pages increasing the copyright year 2011-01-03 10:54:47 +01:00
Ricardo Garcia c3003bb4db Add question about option -g to the FAQ 2011-01-03 10:54:07 +01:00
Ricardo Garcia 6c7d5d8d14 Regenerate download page 2010-12-09 19:58:30 +01:00
Ricardo Garcia 35c0ad4587 Regenerate download page 2010-12-09 19:38:15 +01:00
Ricardo Garcia 941383109a Document new DepositFiles InfoExtractor 2010-12-08 10:59:59 +01:00
Ricardo Garcia c95ac4f062 Regenerate download page 2010-11-19 19:42:22 +01:00
Ricardo Garcia 0732b1f39f Document change from "ord" to "autonumber" in template 2010-11-19 19:38:36 +01:00
Ricardo Garcia 8d78373ea0 Document new upload_date template parameter 2010-11-19 19:38:07 +01:00
Ricardo Garcia 17082ad9db Put the main buttons ordered vertically 2010-11-19 18:24:03 +01:00
Ricardo Garcia 725d763c48 Reorganize website 2010-11-19 18:19:41 +01:00
Ricardo Garcia a993428ac9 Document rtmpdump's optional requirement in the main page 2010-11-06 18:46:50 +01:00
Ricardo Garcia 7136abc8f3 Improve documentation on "SyntaxError" to take Python 3.x into account 2010-11-06 18:45:10 +01:00
Ricardo Garcia 790622091e Renamed "generate-home" to "generate-index" for consistency 2010-11-06 11:29:10 +01:00
Ricardo Garcia db03ff92a6 Changed a few style aspects to make the page look better under IE 2010-11-06 11:29:10 +01:00
Ricardo Garcia b294d9fa38 Use SSL in URLs where possible 2010-11-06 11:28:56 +01:00
Ricardo Garcia 5de4c645e2 Make web pages valid HTML and remove a few glitches 2010-11-02 21:28:56 +01:00
Ricardo Garcia 05703c8f9c Minor changes to stylesheet 2010-11-01 11:28:23 +01:00
Ricardo Garcia f37b138766 Add copyright notice at the bottom of every page 2010-11-01 08:23:57 +01:00
Ricardo Garcia 5c7359365f Initial pages commit, using the wiki contents 2010-10-31 23:50:48 +01:00
235 changed files with 30107 additions and 21730 deletions
+3 -11
View File
@@ -1,6 +1,7 @@
updates_key.pem
*~
*.pyc
*.pyo
*~
*.DS_Store
wine-py2exe/
py2exe.log
@@ -16,13 +17,4 @@ youtube-dl.exe
youtube-dl.tar.gz
.coverage
cover/
updates_key.pem
*.egg-info
*.srt
*.sbv
*.vtt
*.flv
*.mp4
*.part
test/testdata
.tox
youtube_dl.egg-info/
-19
View File
@@ -1,19 +0,0 @@
language: python
python:
- "2.6"
- "2.7"
- "3.3"
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq rtmpdump
script: nosetests test --verbose
notifications:
email:
- filippo.valsorda@gmail.com
- phihag@phihag.de
- jaime.marquinez.ferrandiz+travis@gmail.com
- yasoob.khld@gmail.com
# irc:
# channels:
# - "irc.freenode.org#youtube-dl"
# skip_join: true
-14
View File
@@ -1,14 +0,0 @@
2013.01.02 Codename: GIULIA
* Add support for ComedyCentral clips <nto>
* Corrected Vimeo description fetching <Nick Daniels>
* Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
* --verbose offers more environment info
* New info_dict field: uploader_id
* New updates system, with signature checking
* New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
* Fixed IEs: BlipTv
* Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
* Simplified IEs and test code
* Various (Python 3 and other) fixes
* Revamped and expanded tests
-1
View File
@@ -1 +0,0 @@
2012.12.99
-24
View File
@@ -1,24 +0,0 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
-5
View File
@@ -1,5 +0,0 @@
include README.md
include test/*.py
include test/*.json
include youtube-dl.bash-completion
include youtube-dl.1
-79
View File
@@ -1,79 +0,0 @@
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
clean:
rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
cleanall: clean
rm -f youtube-dl youtube-dl.exe
PREFIX=/usr/local
BINDIR=$(PREFIX)/bin
MANDIR=$(PREFIX)/man
PYTHON=/usr/bin/env python
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
ifeq ($(PREFIX),/usr)
SYSCONFDIR=/etc
else
ifeq ($(PREFIX),/usr/local)
SYSCONFDIR=/etc
else
SYSCONFDIR=$(PREFIX)/etc
endif
endif
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
install -d $(DESTDIR)$(BINDIR)
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
install -d $(DESTDIR)$(MANDIR)/man1
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
test:
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
nosetests --verbose test
tar: youtube-dl.tar.gz
.PHONY: all clean install test tar bash-completion pypi-files
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
echo '#!$(PYTHON)' > youtube-dl
cat youtube-dl.zip >> youtube-dl
rm youtube-dl.zip
chmod a+x youtube-dl
README.md: youtube_dl/*.py youtube_dl/*/*.py
COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
README.txt: README.md
pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.1: README.md
pandoc -s -f markdown -t man README.md -o youtube-dl.1
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
python devscripts/bash-completion.py
bash-completion: youtube-dl.bash-completion
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
--exclude '*.DS_Store' \
--exclude '*.kate-swp' \
--exclude '*.pyc' \
--exclude '*.pyo' \
--exclude '*~' \
--exclude '__pycache' \
--exclude '.git' \
--exclude 'testdata' \
-- \
bin devscripts test youtube_dl \
CHANGELOG LICENSE README.md README.txt \
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
youtube-dl
-285
View File
@@ -1,285 +0,0 @@
% YOUTUBE-DL(1)
# NAME
youtube-dl - download videos from youtube.com or other video platforms
# SYNOPSIS
**youtube-dl** [OPTIONS] URL [URL...]
# DESCRIPTION
**youtube-dl** is a small command-line program to download videos from
YouTube.com and a few more sites. It requires the Python interpreter, version
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
which means you can modify it, redistribute it or use it however you like.
# OPTIONS
-h, --help print this help text and exit
--version print program version and exit
-U, --update update this program to latest version. Make sure
that you have sufficient permissions (run with
sudo if needed)
-i, --ignore-errors continue on download errors, for example to to
skip unavailable videos in a playlist
--abort-on-error Abort downloading of further videos (in the
playlist or the command line) if an error occurs
--dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent
--referer REF specify a custom referer, use if the video access
is restricted to one domain
--list-extractors List all supported extractors and the URLs they
would handle
--extractor-descriptions Output descriptions of all supported extractors
--proxy URL Use the specified HTTP/HTTPS proxy
--no-check-certificate Suppress HTTPS certificate validation.
--cache-dir DIR Location in the filesystem where youtube-dl can
store downloaded information permanently. By
default $XDG_CACHE_HOME/youtube-dl or ~/.cache
/youtube-dl .
--no-cache-dir Disable filesystem caching
## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1)
--playlist-end NUMBER playlist video to end at (default is last)
--match-title REGEX download only matching titles (regex or caseless
sub-string)
--reject-title REGEX skip download for matching titles (regex or
caseless sub-string)
--max-downloads NUMBER Abort after downloading NUMBER files
--min-filesize SIZE Do not download any videos smaller than SIZE
(e.g. 50k or 44.6m)
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
50k or 44.6m)
--date DATE download only videos uploaded in this date
--datebefore DATE download only videos uploaded before this date
--dateafter DATE download only videos uploaded after this date
--no-playlist download only the currently playing video
--age-limit YEARS download only videos suitable for the given age
--download-archive FILE Download only videos not present in the archive
file. Record all downloaded videos in it.
## Download Options:
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g.
50K or 4.2M)
-R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
(default is 1024)
--no-resize-buffer do not automatically adjust the buffer size. By
default, the buffer size is automatically resized
from an initial value of SIZE.
## Filesystem Options:
-t, --title use title in file name (default)
--id use only video ID in file name
-l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(title)s to get
the title, %(uploader)s for the uploader name,
%(uploader_id)s for the uploader nickname if
different, %(autonumber)s to get an automatically
incremented number, %(ext)s for the filename
extension, %(format)s for the format description
(like "22 - 1280x720" or "HD"),%(format_id)s for
the unique id of the format (like Youtube's
itags: "137"),%(upload_date)s for the upload date
(YYYYMMDD), %(extractor)s for the provider
(youtube, metacafe, etc), %(id)s for the video id
, %(playlist)s for the playlist the video is in,
%(playlist_index)s for the position in the
playlist and %% for a literal percent. Use - to
output to stdout. Can also be used to download to
a different directory, for example with -o '/my/d
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
when it is present in output filename template or
--auto-number option is given
--restrict-filenames Restrict filenames to only ASCII characters, and
avoid "&" and spaces in filenames
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
-w, --no-overwrites do not overwrite files
-c, --continue force resume of partially downloaded files. By
default, youtube-dl will resume downloads if
possible.
--no-continue do not resume partially downloaded files (restart
from beginning)
--cookies FILE file to read cookies from and dump cookie jar in
--no-part do not use .part files
--no-mtime do not use the Last-modified header to set the
file modification time
--write-description write video description to a .description file
--write-info-json write video metadata to a .info.json file
--write-annotations write video annotations to a .annotation file
--write-thumbnail write thumbnail image to disk
## Verbosity / Simulation Options:
-q, --quiet activates quiet mode
-s, --simulate do not download the video and do not write
anything to disk
--skip-download do not download the video
-g, --get-url simulate, quiet but print URL
-e, --get-title simulate, quiet but print title
--get-id simulate, quiet but print id
--get-thumbnail simulate, quiet but print thumbnail URL
--get-description simulate, quiet but print video description
--get-filename simulate, quiet but print output filename
--get-format simulate, quiet but print output format
-j, --dump-json simulate, quiet but print JSON information
--newline output progress bar as new lines
--no-progress do not print progress bar
--console-title display progress in console titlebar
-v, --verbose print various debugging information
--dump-intermediate-pages print downloaded pages to debug problems(very
verbose)
--write-pages Write downloaded pages to files in the current
directory
## Video Format Options:
-f, --format FORMAT video format code, specifiy the order of
preference using slashes: "-f 22/17/18". "-f mp4"
and "-f flv" are also supported
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific one
is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats (currently youtube
only)
## Subtitle Options:
--write-sub write subtitle file
--write-auto-sub write automatic subtitle file (youtube only)
--all-subs downloads all the available subtitles of the
video
--list-subs lists all available subtitles for the video
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt] youtube
only)
--sub-lang LANGS languages of the subtitles to download (optional)
separated by commas, use IETF language tags like
'en,pt'
## Authentication Options:
-u, --username USERNAME account username
-p, --password PASSWORD account password
-n, --netrc use .netrc authentication data
--video-password PASSWORD video password (vimeo only)
## Post-processing Options:
-x, --extract-audio convert video files to audio-only files (requires
ffmpeg or avconv and ffprobe or avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
"wav"; best by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
a value between 0 (better) and 9 (worse) for VBR
or a specific bitrate like 128K (default 5)
--recode-video FORMAT Encode the video to another format if necessary
(currently supported: mp4|flv|ogg|webm)
-k, --keep-video keeps the video file on disk after the post-
processing; the video is erased by default
--no-post-overwrites do not overwrite post-processed files; the post-
processed files are overwritten by default
--embed-subs embed subtitles in the video (only for mp4
videos)
--add-metadata add metadata to the files
# CONFIGURATION
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`.
# OUTPUT TEMPLATE
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
- `id`: The sequence will be replaced by the video identifier.
- `url`: The sequence will be replaced by the video URL.
- `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
- `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
- `title`: The sequence will be replaced by the video title.
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
- `playlist`: The name or the id of the playlist that contains the video.
- `playlist_index`: The index of the video in the playlist, a five-digit number.
The current default template is `%(title)s-%(id)s.%(ext)s`.
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
youtube-dl_test_video_.mp4 # A simple file name
# VIDEO SELECTION
Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats:
- Absolute dates: Dates in the format `YYYYMMDD`.
- Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
Examples:
$ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
$ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
$ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
# FAQ
### Can you please put the -b option back?
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
### I get HTTP error 402 when trying to download a video. What's this?
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We're [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
### I have downloaded a video but how can I play it?
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
### The links provided by youtube-dl -g are not working anymore
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
### ERROR: no fmt_url_map or conn information found in video info
youtube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
### ERROR: unable to download video ###
youtube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
### SyntaxError: Non-ASCII character ###
The error
File "youtube-dl", line 2
SyntaxError: Non-ASCII character '\x93' ...
means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
### What is this binary file? Where has the code gone?
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
### The exe throws a *Runtime error from Visual C++*
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
# COPYRIGHT
youtube-dl is released into the public domain by the copyright holders.
This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
# BUGS
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>
Please include:
* Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
* The output of `youtube-dl --version`
* The output of `python --version`
* The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
For discussions, join us in the irc channel #youtube-dl on freenode.
+50
View File
@@ -0,0 +1,50 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>About youtube-dl</title>
<link rel="stylesheet" href="style.css" type="text/css">
</head>
<body>
<table class="heading"><tr>
<td class="title"><a href="index.html">youtube-dl</a></td>
<td class="subtitle">About the program</td>
</tr></table>
<h1>What is it?</h1>
<p><em>youtube-dl</em> is a command-line program to download videos from YouTube.com and a few <a href="supportedsites.html" style="color: blue; text-decoration: underline;">more sites</a>. It requires the <a href="http://www.python.org/">Python interpreter</a>, version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work in your Unix box, in Windows or in Mac OS X. It is released to the public domain, which means you can modify it, redistribute it or use it however you like. The project is currently being developed at <a href="https://github.com/ytdl-org/youtube-dl/"><strong>GitHub</strong></a>.</p>
<h1>Authors</h1>
<p>Core team:</p>
<ul>
<li><a href="https://github.com/remitamine">Remita Amine</a>: core developer.</li>
</ul>
<p>Core team (inactive):</p>
<ul>
<li><a href="https://github.com/rg3">Ricardo Garcia Gonzalez</a>: original author, program core.</li>
<li><a href="https://github.com/phihag">Philipp Hagemeister</a>: core developer, maintainer 2011-2016.</li>
<li><a href="https://github.com/FiloSottile">Filippo Valsorda</a>: core developer, Windows build, testing.</li>
<li><a href="https://github.com/jaimeMF">Jaime Marquínez Ferrándiz</a>: core developer.</li>
<li><a href="https://github.com/yan12125">Yen Chi Hsuan</a>: core developer.</li>
<li><a href="https://github.com/naglis">Naglis Jonaitis</a>: core developer.</li>
<li><a href="https://github.com/pulpe">pulpe</a>: core developer.</li>
<li><a href="https://github.com/dstftw">Sergey M.</a>: core developer, maintainer 2016-2021.</li>
</ul>
<p>Many <a href="https://github.com/ytdl-org/youtube-dl/blob/master/AUTHORS">other people</a> contributing patches, code, ideas and kind messages. <a href="https://github.com/ytdl-org/youtube-dl/graphs/contributors">Too many</a> to be listed here. You know who you are. Thank you very much.</p>
<div class="note">
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">
<img alt="Creative Commons License" style="border-width:0"
src="https://i.creativecommons.org/l/by-sa/3.0/80x15.png" /></a><br />
Copyright © 2006-2011 Ricardo Garcia Gonzalez<br />
Copyright © 2011-2021 youtube-dl developers
</div>
</body>
</html>
-6
View File
@@ -1,6 +0,0 @@
#!/usr/bin/env python
import youtube_dl
if __name__ == '__main__':
youtube_dl.main()
Binary file not shown.
Binary file not shown.
-18
View File
@@ -1,18 +0,0 @@
__youtube_dl()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="{{flags}}"
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater"
if [[ ${cur} =~ : ]]; then
COMPREPLY=( $(compgen -W "${keywords}" -- ${cur}) )
return 0
elif [[ ${cur} == * ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F __youtube_dl youtube-dl
-26
View File
@@ -1,26 +0,0 @@
#!/usr/bin/env python
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
def build_completion(opt_parser):
opts_flag = []
for group in opt_parser.option_groups:
for option in group.option_list:
#for every long flag
opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f:
#just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)
-405
View File
@@ -1,405 +0,0 @@
#!/usr/bin/python3
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import argparse
import ctypes
import functools
import sys
import threading
import traceback
import os.path
class BuildHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
advapi32 = ctypes.windll.advapi32
SC_MANAGER_ALL_ACCESS = 0xf003f
SC_MANAGER_CREATE_SERVICE = 0x02
SERVICE_WIN32_OWN_PROCESS = 0x10
SERVICE_AUTO_START = 0x2
SERVICE_ERROR_NORMAL = 0x1
DELETE = 0x00010000
SERVICE_STATUS_START_PENDING = 0x00000002
SERVICE_STATUS_RUNNING = 0x00000004
SERVICE_ACCEPT_STOP = 0x1
SVCNAME = 'youtubedl_builder'
LPTSTR = ctypes.c_wchar_p
START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR))
class SERVICE_TABLE_ENTRY(ctypes.Structure):
_fields_ = [
('lpServiceName', LPTSTR),
('lpServiceProc', START_CALLBACK)
]
HandlerEx = ctypes.WINFUNCTYPE(
ctypes.c_int, # return
ctypes.c_int, # dwControl
ctypes.c_int, # dwEventType
ctypes.c_void_p, # lpEventData,
ctypes.c_void_p, # lpContext,
)
def _ctypes_array(c_type, py_array):
ar = (c_type * len(py_array))()
ar[:] = py_array
return ar
def win_OpenSCManager():
res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS)
if not res:
raise Exception('Opening service manager failed - '
'are you running this as administrator?')
return res
def win_install_service(service_name, cmdline):
manager = win_OpenSCManager()
try:
h = advapi32.CreateServiceW(
manager, service_name, None,
SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS,
SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
cmdline, None, None, None, None, None)
if not h:
raise OSError('Service creation failed: %s' % ctypes.FormatError())
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_uninstall_service(service_name):
manager = win_OpenSCManager()
try:
h = advapi32.OpenServiceW(manager, service_name, DELETE)
if not h:
raise OSError('Could not find service %s: %s' % (
service_name, ctypes.FormatError()))
try:
if not advapi32.DeleteService(h):
raise OSError('Deletion failed: %s' % ctypes.FormatError())
finally:
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_service_report_event(service_name, msg, is_error=True):
with open('C:/sshkeys/log', 'a', encoding='utf-8') as f:
f.write(msg + '\n')
event_log = advapi32.RegisterEventSourceW(None, service_name)
if not event_log:
raise OSError('Could not report event: %s' % ctypes.FormatError())
try:
type_id = 0x0001 if is_error else 0x0004
event_id = 0xc0000000 if is_error else 0x40000000
lines = _ctypes_array(LPTSTR, [msg])
if not advapi32.ReportEventW(
event_log, type_id, 0, event_id, None, len(lines), 0,
lines, None):
raise OSError('Event reporting failed: %s' % ctypes.FormatError())
finally:
advapi32.DeregisterEventSource(event_log)
def win_service_handler(stop_event, *args):
try:
raise ValueError('Handler called with args ' + repr(args))
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_set_status(handle, status_code):
svcStatus = SERVICE_STATUS()
svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
svcStatus.dwCurrentState = status_code
svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP
svcStatus.dwServiceSpecificExitCode = 0
if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)):
raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError())
def win_service_main(service_name, real_main, argc, argv_raw):
try:
#args = [argv_raw[i].value for i in range(argc)]
stop_event = threading.Event()
handler = HandlerEx(functools.partial(stop_event, win_service_handler))
h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
if not h:
raise OSError('Handler registration failed: %s' %
ctypes.FormatError())
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_start(service_name, real_main):
try:
cb = START_CALLBACK(
functools.partial(win_service_main, service_name, real_main))
dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [
SERVICE_TABLE_ENTRY(
service_name,
cb
),
SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK))
])
if not advapi32.StartServiceCtrlDispatcherW(dispatch_table):
raise OSError('ctypes start failed: %s' % ctypes.FormatError())
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--install',
action='store_const', dest='action', const='install',
help='Launch at Windows startup')
parser.add_argument('-u', '--uninstall',
action='store_const', dest='action', const='uninstall',
help='Remove Windows service')
parser.add_argument('-s', '--service',
action='store_const', dest='action', const='service',
help='Run as a Windows service')
parser.add_argument('-b', '--bind', metavar='<host:port>',
action='store', default='localhost:8142',
help='Bind to host:port (default %default)')
options = parser.parse_args(args=args)
if options.action == 'install':
fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox')
cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind)
win_install_service(SVCNAME, cmdline)
return
if options.action == 'uninstall':
win_uninstall_service(SVCNAME)
return
if options.action == 'service':
win_service_start(SVCNAME, main)
return
host, port_str = options.bind.split(':')
port = int(port_str)
print('Listening on %s:%d' % (host, port))
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
thr = threading.Thread(target=srv.serve_forever)
thr.start()
input('Press ENTER to shut down')
srv.shutdown()
thr.join()
def rmtree(path):
for name in os.listdir(path):
fname = os.path.join(path, name)
if os.path.isdir(fname):
rmtree(fname)
else:
os.chmod(fname, 0o666)
os.remove(fname)
os.rmdir(path)
#==============================================================================
class BuildError(Exception):
def __init__(self, output, code=500):
self.output = output
self.code = code
def __str__(self):
return self.output
class HTTPError(BuildError):
pass
class PythonBuilder(object):
def __init__(self, **kwargs):
pythonVersion = kwargs.pop('python', '2.7')
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore\%s\InstallPath' % pythonVersion)
try:
self.pythonPath, _ = _winreg.QueryValueEx(key, '')
finally:
_winreg.CloseKey(key)
except Exception:
raise BuildError('No such Python version: %s' % pythonVersion)
super(PythonBuilder, self).__init__(**kwargs)
class GITInfoBuilder(object):
def __init__(self, **kwargs):
try:
self.user, self.repoName = kwargs['path'][:2]
self.rev = kwargs.pop('rev')
except ValueError:
raise BuildError('Invalid path')
except KeyError as e:
raise BuildError('Missing mandatory parameter "%s"' % e.args[0])
path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user)
if not os.path.exists(path):
os.makedirs(path)
self.basePath = tempfile.mkdtemp(dir=path)
self.buildPath = os.path.join(self.basePath, 'build')
super(GITInfoBuilder, self).__init__(**kwargs)
class GITBuilder(GITInfoBuilder):
def build(self):
try:
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(GITBuilder, self).build()
class YoutubeDLBuilder(object):
authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile']
def __init__(self, **kwargs):
if self.repoName != 'youtube-dl':
raise BuildError('Invalid repository "%s"' % self.repoName)
if self.user not in self.authorizedUsers:
raise HTTPError('Unauthorized user "%s"' % self.user, 401)
super(YoutubeDLBuilder, self).__init__(**kwargs)
def build(self):
try:
subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(YoutubeDLBuilder, self).build()
class DownloadBuilder(object):
def __init__(self, **kwargs):
self.handler = kwargs.pop('handler')
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
self.srcPath = os.path.abspath(os.path.normpath(self.srcPath))
if not self.srcPath.startswith(self.buildPath):
raise HTTPError(self.srcPath, 401)
super(DownloadBuilder, self).__init__(**kwargs)
def build(self):
if not os.path.exists(self.srcPath):
raise HTTPError('No such file', 404)
if os.path.isdir(self.srcPath):
raise HTTPError('Is a directory: %s' % self.srcPath, 401)
self.handler.send_response(200)
self.handler.send_header('Content-Type', 'application/octet-stream')
self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1])
self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size))
self.handler.end_headers()
with open(self.srcPath, 'rb') as src:
shutil.copyfileobj(src, self.handler.wfile)
super(DownloadBuilder, self).build()
class CleanupTempDir(object):
def build(self):
try:
rmtree(self.basePath)
except Exception as e:
print('WARNING deleting "%s": %s' % (self.basePath, e))
super(CleanupTempDir, self).build()
class Null(object):
def __init__(self, **kwargs):
pass
def start(self):
pass
def close(self):
pass
def build(self):
pass
class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null):
pass
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
def do_GET(self):
path = urlparse.urlparse(self.path)
paramDict = dict([(key, value[0]) for key, value in urlparse.parse_qs(path.query).items()])
action, _, path = path.path.strip('/').partition('/')
if path:
path = path.split('/')
if action in self.actionDict:
try:
builder = self.actionDict[action](path=path, handler=self, **paramDict)
builder.start()
try:
builder.build()
finally:
builder.close()
except BuildError as e:
self.send_response(e.code)
msg = unicode(e).encode('UTF-8')
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg)
except HTTPError as e:
self.send_response(e.code, str(e))
else:
self.send_response(500, 'Unknown build method "%s"' % action)
else:
self.send_response(500, 'Malformed URL')
#==============================================================================
if __name__ == '__main__':
main()
-39
View File
@@ -1,39 +0,0 @@
#!/usr/bin/env python
"""
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
if we are not 'age_limit' tagging some porn site
"""
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_testcases
from youtube_dl.utils import compat_urllib_request
for test in get_testcases():
try:
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
except:
print('\nFail: {0}'.format(test['name']))
continue
webpage = webpage.decode('utf8', 'replace')
if 'porn' in webpage.lower() and ('info_dict' not in test
or 'age_limit' not in test['info_dict']
or test['info_dict']['age_limit'] != 18):
print('\nPotential missing age_limit check: {0}'.format(test['name']))
elif 'porn' not in webpage.lower() and ('info_dict' in test and
'age_limit' in test['info_dict'] and
test['info_dict']['age_limit'] == 18):
print('\nPotential false negative: {0}'.format(test['name']))
else:
sys.stdout.write('.')
sys.stdout.flush()
print()
-42
View File
@@ -1,42 +0,0 @@
#!/usr/bin/env python3
import json
import sys
import hashlib
import os.path
if len(sys.argv) <= 1:
print('Specify the version number as parameter')
sys.exit()
version = sys.argv[1]
with open('update/LATEST_VERSION', 'w') as f:
f.write(version)
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
new_version = {}
filenames = {
'bin': 'youtube-dl',
'exe': 'youtube-dl.exe',
'tar': 'youtube-dl-%s.tar.gz' % version}
build_dir = os.path.join('..', '..', 'build', version)
for key, filename in filenames.items():
url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename)
fn = os.path.join(build_dir, filename)
with open(fn, 'rb') as f:
data = f.read()
if not data:
raise ValueError('File %s is empty!' % fn)
sha256sum = hashlib.sha256(data).hexdigest()
new_version[key] = (url, sha256sum)
versions_info['versions'][version] = new_version
versions_info['latest'] = version
with open('update/versions.json', 'w') as jsonf:
json.dump(versions_info, jsonf, indent=4, sort_keys=True)
-32
View File
@@ -1,32 +0,0 @@
#!/usr/bin/env python3
import hashlib
import shutil
import subprocess
import tempfile
import urllib.request
import json
versions_info = json.load(open('update/versions.json'))
version = versions_info['latest']
URL = versions_info['versions'][version]['bin'][0]
data = urllib.request.urlopen(URL).read()
# Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
md5sum = hashlib.md5(data).hexdigest()
sha1sum = hashlib.sha1(data).hexdigest()
sha256sum = hashlib.sha256(data).hexdigest()
template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', URL)
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template)
-32
View File
@@ -1,32 +0,0 @@
#!/usr/bin/env python3
import rsa
import json
from binascii import hexlify
try:
input = raw_input
except NameError:
pass
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:')
privkey = b''
while True:
try:
line = input()
except EOFError:
break
if line == '':
break
privkey += line.encode('ascii') + b'\n'
privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature)
versions_info['signature'] = signature
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
-21
View File
@@ -1,21 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement
import datetime
import glob
import io # For Python 2 compatibilty
import os
import re
year = str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f:
content = f.read()
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
if content != newc:
tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
outf.write(newc)
os.rename(tmpFn, fn)
-56
View File
@@ -1,56 +0,0 @@
#!/usr/bin/env python3
import datetime
import textwrap
import json
atom_template=textwrap.dedent("""\
<?xml version='1.0' encoding='utf-8'?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
<atom:title>youtube-dl releases</atom:title>
<atom:id>youtube-dl-updates-feed</atom:id>
<atom:updated>@TIMESTAMP@</atom:updated>
@ENTRIES@
</atom:feed>""")
entry_template=textwrap.dedent("""
<atom:entry>
<atom:id>youtube-dl-@VERSION@</atom:id>
<atom:title>New version @VERSION@</atom:title>
<atom:link href="http://rg3.github.io/youtube-dl" />
<atom:content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
</div>
</atom:content>
<atom:author>
<atom:name>The youtube-dl maintainers</atom:name>
</atom:author>
<atom:updated>@TIMESTAMP@</atom:updated>
</atom:entry>
""")
now = datetime.datetime.now()
now_iso = now.isoformat()
atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
entries=[]
versions_info = json.load(open('update/versions.json'))
versions = list(versions_info['versions'].keys())
versions.sort()
for v in versions:
entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
entry = entry.replace('@VERSION@',v)
entries.append(entry)
entries_str = textwrap.indent(''.join(entries), '\t')
atom_template = atom_template.replace('@ENTRIES@', entries_str)
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
atom_file.write(atom_template)
-34
View File
@@ -1,34 +0,0 @@
#!/usr/bin/env python3
import sys
import os
import textwrap
# We must be able to import youtube_dl
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import youtube_dl
def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
ie_htmls = []
for ie in sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()):
ie_html = '<b>{}</b>'.format(ie.IE_NAME)
ie_desc = getattr(ie, 'IE_DESC', None)
if ie_desc is False:
continue
elif ie_desc is not None:
ie_html += ': {}'.format(ie.IE_DESC)
if ie.working() == False:
ie_html += ' (Currently broken)'
ie_htmls.append('<li>{}</li>'.format(ie_html))
template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
with open('supportedsites.html', 'w', encoding='utf-8') as sitesf:
sitesf.write(template)
if __name__ == '__main__':
main()
-20
View File
@@ -1,20 +0,0 @@
import sys
import re
README_FILE = 'README.md'
helptext = sys.stdin.read()
with open(README_FILE) as f:
oldreadme = f.read()
header = oldreadme[:oldreadme.index('# OPTIONS')]
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
options = helptext[helptext.index(' General Options:')+19:]
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
options = '# OPTIONS\n' + options + '\n'
with open(README_FILE, 'w') as f:
f.write(header)
f.write(options)
f.write(footer)
-6
View File
@@ -1,6 +0,0 @@
# source this file in your shell to get a POSIX locale (which will break many programs, but that's kind of the point)
export LC_ALL=POSIX
export LANG=POSIX
export LANGUAGE=POSIX
-101
View File
@@ -1,101 +0,0 @@
#!/bin/bash
# IMPORTANT: the following assumptions are made
# * the GH repo is on the origin remote
# * the gh-pages branch is named so locally
# * the git config user.signingkey is properly set
# You will need
# pip install coverage nose rsa
# TODO
# release notes
# make hash on local files
set -e
skip_tests=false
if [ "$1" = '--skip-test' ]; then
skip_tests=true
shift
fi
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
version="$1"
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
/bin/echo -e "\n### First of all, testing..."
make cleanall
if $skip_tests ; then
echo 'SKIPPING TESTS'
else
nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
fi
/bin/echo -e "\n### Changing version in version.py..."
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
make README.md
git add CHANGELOG README.md youtube_dl/version.py
git commit -m "release $version"
/bin/echo -e "\n### Now tagging, signing and pushing..."
git tag -s -m "Release $version" "$version"
git show "$version"
read -p "Is it good, can I push? (y/n) " -n 1
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
echo
MASTER=$(git rev-parse --abbrev-ref HEAD)
git push origin $MASTER:master
git push origin "$version"
/bin/echo -e "\n### OK, now it is time to build the binaries..."
REV=$(git rev-parse HEAD)
make youtube-dl youtube-dl.tar.gz
read -p "VM running? (y/n) " -n 1
wget "http://localhost:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
mkdir -p "build/$version"
mv youtube-dl youtube-dl.exe "build/$version"
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
(cd build/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
git checkout HEAD -- youtube-dl youtube-dl.exe
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
scp -r "build/$version" ytdl@yt-dl.org:html/tmp/
ssh ytdl@yt-dl.org "mv html/tmp/$version html/downloads/"
ssh ytdl@yt-dl.org "sh html/update_latest.sh $version"
/bin/echo -e "\n### Now switching to gh-pages..."
git clone --branch gh-pages --single-branch . build/gh-pages
ROOT=$(pwd)
(
set -e
ORIGIN_URL=$(git config --get remote.origin.url)
cd build/gh-pages
"$ROOT/devscripts/gh-pages/add-version.py" $version
"$ROOT/devscripts/gh-pages/update-feed.py"
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
"$ROOT/devscripts/gh-pages/generate-download.py"
"$ROOT/devscripts/gh-pages/update-copyright.py"
"$ROOT/devscripts/gh-pages/update-sites.py"
git add *.html *.html.in update
git commit -m "release $version"
git push "$ROOT" gh-pages
git push "$ORIGIN_URL" gh-pages
)
rm -rf build
make pypi-files
echo "Uploading to PyPi ..."
python setup.py sdist upload
make clean
/bin/echo -e "\n### DONE!"
-40
View File
@@ -1,40 +0,0 @@
#!/usr/bin/env python
import sys, os
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
urlh = compat_urllib_request.urlopen(BIN_URL)
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
-12
View File
@@ -1,12 +0,0 @@
from distutils.core import setup
import py2exe
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
@@ -1,102 +0,0 @@
#!/usr/bin/env python
import sys, os
import urllib2
import json, hashlib
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
raw_input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory)
try:
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = urllib2.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to write the new version')
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
-56
View File
@@ -1,56 +0,0 @@
#!/bin/bash
# Run with as parameter a setup.py that works in the current directory
# e.g. no os.chdir()
# It will run twice, the first time will crash
set -e
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
if [ ! -d wine-py2exe ]; then
sudo apt-get install wine1.3 axel bsdiff
mkdir wine-py2exe
cd wine-py2exe
export WINEPREFIX=`pwd`
axel -a "http://www.python.org/ftp/python/2.7/python-2.7.msi"
axel -a "http://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.7.exe"
#axel -a "http://winetricks.org/winetricks"
# http://appdb.winehq.org/objectManager.php?sClass=version&iId=21957
echo "Follow python setup on screen"
wine msiexec /i python-2.7.msi
echo "Follow py2exe setup on screen"
wine py2exe-0.6.9.win32-py2.7.exe
#echo "Follow Microsoft Visual C++ 2008 Redistributable Package setup on screen"
#bash winetricks vcrun2008
rm py2exe-0.6.9.win32-py2.7.exe
rm python-2.7.msi
#rm winetricks
# http://bugs.winehq.org/show_bug.cgi?id=3591
mv drive_c/Python27/Lib/site-packages/py2exe/run.exe drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup
bspatch drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run.exe "$SCRIPT_DIR/SizeOfImage.patch"
mv drive_c/Python27/Lib/site-packages/py2exe/run_w.exe drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup
bspatch drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run_w.exe "$SCRIPT_DIR/SizeOfImage_w.patch"
cd -
else
export WINEPREFIX="$( cd wine-py2exe && pwd )"
fi
wine "C:\\Python27\\python.exe" "$1" py2exe > "py2exe.log" 2>&1 || true
echo '# Copying python27.dll' >> "py2exe.log"
cp "$WINEPREFIX/drive_c/windows/system32/python27.dll" build/bdist.win32/winexe/bundle-2.7/
wine "C:\\Python27\\python.exe" "$1" py2exe >> "py2exe.log" 2>&1
+7
View File
@@ -0,0 +1,7 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<meta http-equiv="refresh" content="0; url=https://github.com/ytdl-org/youtube-dl/blob/master/README.md#readme">
</head>
</html>
+77
View File
@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>youtube-dl: Download Page</title>
<link rel="stylesheet" href="style.css" type="text/css">
</head>
<body>
<table class="heading"><tr>
<td class="title"><a href="index.html">youtube-dl</a></td>
<td class="subtitle">Download Page</td>
</tr></table>
<p>Remember <em>youtube-dl</em> requires <a href="http://www.python.org/">Python</a> version 2.6, 2.7, or 3.2+ to work except for Windows exe.</p>
<p><a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl.exe">Windows exe</a> requires <a href="https://download.microsoft.com/download/1/6/5/165255E7-1014-4D0A-B094-B6A430A6BFFC/vcredist_x86.exe">Microsoft Visual C++ 2010 Service Pack 1 Redistributable Package (x86)</a> and does not require Python that is already embedded into the binary.</p>
<h2><a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl">2021.12.17</a> (<a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl.sig">sig</a>)</h2>
<p><strong>SHA256</strong>: 7880e01abe282c7fd596f429c35189851180d6177302bb215be1cdec78d6d06d</p>
<p>
<a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl.exe">Windows exe</a> (<a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl.exe.sig">sig</a> - SHA256 26e5c00c35c5c3edc86dfc0a720aed109a13b1b7c67ac654a0ce8ff82a1f2c16)<br>
<a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl-2021.12.17.tar.gz">Full source + docs + binary tarball</a> (<a href="https://yt-dl.org/downloads/2021.12.17/youtube-dl-2021.12.17.tar.gz.sig">sig</a> - SHA256 9f3b99c8b778455165b4525f21505e86c7ff565f3ac319e19733d810194135df)
</p>
<p>To install it right away for all UNIX users (Linux, OS X, etc.), type:
<code class="commands">sudo curl -L https://yt-dl.org/downloads/latest/youtube-dl -o /usr/local/bin/youtube-dl<br/>
sudo chmod a+rx /usr/local/bin/youtube-dl</code>
</p>
<p>If you do not have curl, you can alternatively use a recent wget:
<code class="commands">sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl<br/>
sudo chmod a+rx /usr/local/bin/youtube-dl</code>
</p>
<p>You can also use pip:
<code class="commands">sudo pip install --upgrade youtube_dl</code>
</p>
<p>
This command will update youtube-dl if you have already installed it.
See the <a href="https://pypi.python.org/pypi/youtube_dl">pypi page</a> for more information.
</p>
<p>You can use Homebrew if you have it:
<code class="commands">brew install youtube-dl</code>
</p>
<p>To check the signature, type:
<code class="commands">sudo wget https://yt-dl.org/downloads/latest/youtube-dl.sig -O youtube-dl.sig<br/>gpg --verify youtube-dl.sig /usr/local/bin/youtube-dl<br/>rm youtube-dl.sig</code>
</p>
<p>The following GPG keys will be used to sign the binaries and the git tags:</p>
<ul>
<li>Sergey M. <code class="fingerprint"><a href="https://dstftw.github.io/keys/18A9236D.asc">ED7F 5BF4 6B3B BED8 1C87 368E 2C39 3E0F 18A9 236D</a></code></li>
</ul>
<p>Older releases are also signed with one of:
<ul>
<li>Philipp Hagemeister <code class="fingerprint"><a href="https://phihag.de/keys/A4826A18.asc">7D33 D762 FD6C 3513 0481 347F DB4B 54CB A482 6A18</a></code> (until 2016-05-30)</li>
<li>Philipp Hagemeister <code class="fingerprint">0600 E1DB 6FB5 3A5D 95D8 FC0D F5EA B582 FAFB 085C</code> (until 2013-06-01)</li>
</li>
<li>Filippo Valsorda <code class="fingerprint">428D F5D6 3EF0 7494 BB45 5AC0 EBF0 1804 BCF0 5F6B</code> (until 2014)</li>
</p>
<div class="note">
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">
<img alt="Creative Commons License" style="border-width:0"
src="https://i.creativecommons.org/l/by-sa/3.0/80x15.png" /></a><br />
Copyright © 2006-2011 Ricardo Garcia Gonzalez<br />
Copyright © 2011-2021 youtube-dl developers
</div>
</body>
</html>
+77
View File
@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>youtube-dl: Download Page</title>
<link rel="stylesheet" href="style.css" type="text/css">
</head>
<body>
<table class="heading"><tr>
<td class="title"><a href="index.html">youtube-dl</a></td>
<td class="subtitle">Download Page</td>
</tr></table>
<p>Remember <em>youtube-dl</em> requires <a href="http://www.python.org/">Python</a> version 2.6, 2.7, or 3.2+ to work except for Windows exe.</p>
<p><a href="@EXE_URL@">Windows exe</a> requires <a href="https://download.microsoft.com/download/1/6/5/165255E7-1014-4D0A-B094-B6A430A6BFFC/vcredist_x86.exe">Microsoft Visual C++ 2010 Service Pack 1 Redistributable Package (x86)</a> and does not require Python that is already embedded into the binary.</p>
<h2><a href="@PROGRAM_URL@">@PROGRAM_VERSION@</a> (<a href="@PROGRAM_URL@.sig">sig</a>)</h2>
<p><strong>SHA256</strong>: @PROGRAM_SHA256SUM@</p>
<p>
<a href="@EXE_URL@">Windows exe</a> (<a href="@EXE_URL@.sig">sig</a> - SHA256 @EXE_SHA256SUM@)<br>
<a href="@TAR_URL@">Full source + docs + binary tarball</a> (<a href="@TAR_URL@.sig">sig</a> - SHA256 @TAR_SHA256SUM@)
</p>
<p>To install it right away for all UNIX users (Linux, OS X, etc.), type:
<code class="commands">sudo curl -L https://yt-dl.org/downloads/latest/youtube-dl -o /usr/local/bin/youtube-dl<br/>
sudo chmod a+rx /usr/local/bin/youtube-dl</code>
</p>
<p>If you do not have curl, you can alternatively use a recent wget:
<code class="commands">sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl<br/>
sudo chmod a+rx /usr/local/bin/youtube-dl</code>
</p>
<p>You can also use pip:
<code class="commands">sudo pip install --upgrade youtube_dl</code>
</p>
<p>
This command will update youtube-dl if you have already installed it.
See the <a href="https://pypi.python.org/pypi/youtube_dl">pypi page</a> for more information.
</p>
<p>You can use Homebrew if you have it:
<code class="commands">brew install youtube-dl</code>
</p>
<p>To check the signature, type:
<code class="commands">sudo wget https://yt-dl.org/downloads/latest/youtube-dl.sig -O youtube-dl.sig<br/>gpg --verify youtube-dl.sig /usr/local/bin/youtube-dl<br/>rm youtube-dl.sig</code>
</p>
<p>The following GPG keys will be used to sign the binaries and the git tags:</p>
<ul>
<li>Sergey M. <code class="fingerprint"><a href="https://dstftw.github.io/keys/18A9236D.asc">ED7F 5BF4 6B3B BED8 1C87 368E 2C39 3E0F 18A9 236D</a></code></li>
</ul>
<p>Older releases are also signed with one of:
<ul>
<li>Philipp Hagemeister <code class="fingerprint"><a href="https://phihag.de/keys/A4826A18.asc">7D33 D762 FD6C 3513 0481 347F DB4B 54CB A482 6A18</a></code> (until 2016-05-30)</li>
<li>Philipp Hagemeister <code class="fingerprint">0600 E1DB 6FB5 3A5D 95D8 FC0D F5EA B582 FAFB 085C</code> (until 2013-06-01)</li>
</li>
<li>Filippo Valsorda <code class="fingerprint">428D F5D6 3EF0 7494 BB45 5AC0 EBF0 1804 BCF0 5F6B</code> (until 2014)</li>
</p>
<div class="note">
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">
<img alt="Creative Commons License" style="border-width:0"
src="https://i.creativecommons.org/l/by-sa/3.0/80x15.png" /></a><br />
Copyright © 2006-2011 Ricardo Garcia Gonzalez<br />
Copyright © 2011-2021 youtube-dl developers
</div>
</body>
</html>
+7
View File
@@ -0,0 +1,7 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<meta http-equiv="refresh" content="0; url=https://github.com/ytdl-org/youtube-dl/blob/master/README.md#faq">
</head>
</html>
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 435 B

BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 356 B

BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 425 B

BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 349 B

BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 331 B

+37
View File
@@ -0,0 +1,37 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />
<title>youtube-dl</title>
<link rel="stylesheet" href="style.css" type="text/css" />
<link rel="alternate" type="application/atom+xml" title="youtube-dl releases" href="update/releases.atom" />
</head>
<body>
<table class="heading"><tr>
<td class="title"><a href="index.html">youtube-dl</a></td>
<td class="subtitle">Download videos from YouTube
(and <a href="supportedsites.html" style="color: blue; text-decoration: underline;">more sites</a>)</td>
</tr></table>
<p><em>youtube-dl</em> is a command-line program to download videos from YouTube.com and a few <a href="supportedsites.html" style="color: blue; text-decoration: underline;">more sites</a>. It requires the <a href="http://www.python.org/">Python interpreter</a> (2.6, 2.7, or 3.2+), and it is not platform specific. We also provide a <a href="https://yt-dl.org/latest/youtube-dl.exe">Windows executable</a> that includes Python. youtube-dl should work in your Unix box, in Windows or in Mac OS X. It is released to the public domain, which means you can modify it, redistribute it or use it however you like.</p>
<table border="0" id="rgb">
<tr><td><a class="button" id="r" href="https://github.com/ytdl-org/youtube-dl/blob/master/README.md#readme">Documentation</a></td></tr>
<tr><td><a class="button" id="g" href="download.html">Download</a></td></tr>
<tr><td><a class="button" id="main-support" href="https://github.com/ytdl-org/youtube-dl/issues/new/choose">Support</a></td></tr>
<tr><td><a class="button" id="y" href="https://github.com/ytdl-org/youtube-dl/">Develop</a></td></tr>
<tr><td><a class="button" id="b" href="about.html">About</a></td></tr>
</table>
<p>You can also contact us on the irc channel <a href="irc://chat.freenode.net/#youtube-dl">#youtube-dl</a> (<a href="http://webchat.freenode.net/?randomnick=1&amp;channels=youtube-dl">webchat</a>) on freenode.</p>
<div class="note">
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">
<img alt="Creative Commons License" style="border-width:0"
src="https://i.creativecommons.org/l/by-sa/3.0/80x15.png" /></a><br />
Copyright © 2006-2011 Ricardo Garcia Gonzalez<br />
Copyright © 2011-2021 youtube-dl developers
</div>
</body>
</html>
+1
View File
@@ -0,0 +1 @@
../update/LATEST_VERSION
-92
View File
@@ -1,92 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import pkg_resources
import sys
try:
from setuptools import setup
setuptools_available = True
except ImportError:
from distutils.core import setup
setuptools_available = False
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
params = {
'data_files': [ # Installing system-wide would require sudo...
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
setup(
name='youtube_dl',
version=__version__,
description='YouTube video downloader',
long_description='Small command-line program to download videos from'
' YouTube.com and other video sites.',
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='ytdl@yt-dl.org',
maintainer='Philipp Hagemeister',
maintainer_email='phihag@phihag.de',
packages=['youtube_dl', 'youtube_dl.extractor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
"Topic :: Multimedia :: Video",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: Public Domain",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3"
],
**params
)
+156
View File
@@ -0,0 +1,156 @@
body {
font-family: sans-serif;
margin-left: 10%;
margin-right: 10%;
margin-top: 2ex;
margin-bottom: 3ex;
background-color: white;
color: black;
/*background-color: #fff1db;*/
background-color: white;
/*
background-image: url("gradient.png");
background-repeat: repeat-x;
*/
/*
background-image: url("gradient2.png");
background-repeat: repeat-y;
*/
/*
background-image: url("gradient3.png");
background-repeat: repeat-x;
*/
/*
background-image: url("gradient4.png");
background-repeat: repeat-y;
*/
background-image: url("gradient5.png");
background-repeat: repeat-x;
}
.heading {
border: 0;
color: black;
font-size: xx-large;
font-weight: bold;
padding-bottom: 1ex;
border-bottom: 1px solid black;
margin-bottom: 2ex;
width: 100%;
}
.heading tr {
border: 0;
}
.heading td {
border: 0;
}
.heading a {
text-decoration: none;
color: black;
}
.title {
text-align: left;
}
.subtitle {
text-align: right;
}
.toc {
padding-left: 2ex;
border: 1px solid #aaaaaa;
background-color: white;
padding-bottom: 1ex;
border-radius: 10px;
-moz-border-radius: 10px;
}
.toc ul {
margin: 0; list-style-type: none;
}
hr {
margin-top: 3ex;
margin-bottom: 3ex;
width: 50%;
}
.note {
margin-top: 10ex;
text-align: center;
font-size: x-small;
}
h1 {
font-size: x-large;
margin-top: 2ex;
color: black;
margin-left: 2%;
margin-right: 2%;
}
h2 {
font-size: large;
margin-left: 5%;
margin-right: 5%;
}
p {
margin-left: 5%;
margin-right: 5%;
}
ul {
margin-left: 5%;
margin-right: 5%;
}
li {
margin-left: 3%;
margin-top: 0.5ex;
margin-bottom: 0.5ex;
}
tt {
padding-left: 0.5ex;
padding-right: 0.5ex;
background: #dddddd;
}
#rgb {
width: 33%;
margin: 3ex auto;
}
.button {
color: white;
font-weight: bold;
font-size: x-large;
text-decoration: none;
text-align: center;
display: block;
padding: 2ex;
border-radius: 10px;
-moz-border-radius: 10px;
}
#r {
background-color: #884444;
border: 2px solid #880000;
}
#g {
background-color: #448844;
border: 2px solid #006600;
}
#b {
background-color: #444488;
border: 2px solid #000088;
}
#y {
background-color: #888844;
border: 2px solid #666600;
}
#main-support {
background-color: #448888;
border: 2px solid #008888;
}
code.commands {
display:block;
margin-top: 0.4em;
padding: 0.7em;
background: #ccc;
background: rgba(200, 200, 200, 0.4);
white-space: pre;
}
code.fingerprint {
font-weight: bold;
font-size: 140%;
}
+1252
View File
File diff suppressed because it is too large Load Diff
+28
View File
@@ -0,0 +1,28 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>youtube-dl: Supported sites</title>
<link rel="stylesheet" href="style.css" type="text/css">
</head>
<body>
<table class="heading"><tr>
<td class="title"><a href="index.html">youtube-dl</a></td>
<td class="subtitle">Supported sites</td>
</tr></table>
<p>Here's is the list of all the supported sites, ordered alphabetically:</p>
<ul>
@SITES@
</ul>
<div class="note">
<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">
<img alt="Creative Commons License" style="border-width:0"
src="https://i.creativecommons.org/l/by-sa/3.0/80x15.png" /></a><br />
Copyright © 2006-2011 Ricardo Garcia Gonzalez<br />
Copyright © 2011-2021 youtube-dl developers
</div>
</body>
</html>
View File
-85
View File
@@ -1,85 +0,0 @@
import errno
import io
import hashlib
import json
import os.path
import re
import types
import sys
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.utils import preferredencoding
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = u'\033[0;33mWARNING:\033[0m'
else:
_msg_header = u'WARNING:'
output = u'%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message): return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def get_testcases():
for ie in youtube_dl.extractor.gen_extractors():
t = getattr(ie, '_TEST', None)
if t:
t['name'] = type(ie).__name__[:-len('IE')]
yield t
for t in getattr(ie, '_TESTS', []):
t['name'] = type(ie).__name__[:-len('IE')]
yield t
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
-43
View File
@@ -1,43 +0,0 @@
{
"consoletitle": false,
"continuedl": true,
"forcedescription": false,
"forcefilename": false,
"forceformat": false,
"forcethumbnail": false,
"forcetitle": false,
"forceurl": false,
"format": null,
"format_limit": null,
"ignoreerrors": false,
"listformats": null,
"logtostderr": false,
"matchtitle": null,
"max_downloads": null,
"nooverwrites": false,
"nopart": false,
"noprogress": false,
"outtmpl": "%(id)s.%(ext)s",
"password": null,
"playlistend": -1,
"playliststart": 1,
"prefer_free_formats": false,
"quiet": false,
"ratelimit": null,
"rejecttitle": null,
"retries": 10,
"simulate": false,
"skip_download": false,
"subtitleslang": null,
"subtitlesformat": "srt",
"test": true,
"updatetime": true,
"usenetrc": false,
"username": null,
"verbose": true,
"writedescription": false,
"writeinfojson": true,
"writesubtitles": false,
"allsubtitles": false,
"listssubtitles": false
}
-145
View File
@@ -1,145 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{u'ext': u'webm', u'height': 460},
{u'ext': u'mp4', u'height': 460},
]
info_dict = {u'formats': formats, u'extractor': u'test'}
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'ext'], u'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{u'ext': u'webm', u'height': 720},
{u'ext': u'mp4', u'height': 1080},
]
info_dict[u'formats'] = formats
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'ext'], u'mp4')
# No prefer_free_formats => keep original formats order
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{u'ext': u'webm', u'height': 720},
{u'ext': u'flv', u'height': 720},
]
info_dict[u'formats'] = formats
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'ext'], u'flv')
def test_format_limit(self):
formats = [
{u'format_id': u'meh', u'url': u'http://example.com/meh'},
{u'format_id': u'good', u'url': u'http://example.com/good'},
{u'format_id': u'great', u'url': u'http://example.com/great'},
{u'format_id': u'excellent', u'url': u'http://example.com/exc'},
]
info_dict = {
u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'format_id'], u'excellent')
ydl = YDL({'format_limit': 'good'})
assert ydl.params['format_limit'] == 'good'
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'format_id'], u'good')
ydl = YDL({'format_limit': 'great', 'format': 'all'})
ydl.process_ie_result(info_dict)
self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
self.assertTrue('3' in ydl.msgs[0])
ydl = YDL()
ydl.params['format_limit'] = 'excellent'
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded[u'format_id'], u'excellent')
def test_format_selection(self):
formats = [
{u'format_id': u'35', u'ext': u'mp4'},
{u'format_id': u'45', u'ext': u'webm'},
{u'format_id': u'47', u'ext': u'webm'},
{u'format_id': u'2', u'ext': u'flv'},
]
info_dict = {u'formats': formats, u'extractor': u'test'}
ydl = YDL({'format': u'20/47'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'47')
ydl = YDL({'format': u'20/71/worst'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'35')
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'2')
ydl = YDL({'format': u'webm/mp4'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'47')
ydl = YDL({'format': u'3gp/40/mp4'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], u'35')
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
if __name__ == '__main__':
unittest.main()
-54
View File
@@ -1,54 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import try_rm
from youtube_dl import YoutubeDL
def _download_restricted(url, filename, age):
""" Returns true iff the file has been downloaded """
params = {
'age_limit': age,
'skip_download': True,
'writeinfojson': True,
"outtmpl": "%(id)s.%(ext)s",
}
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
json_filename = os.path.splitext(filename)[0] + '.info.json'
try_rm(json_filename)
ydl.download([url])
res = os.path.exists(json_filename)
try_rm(json_filename)
return res
class TestAgeRestriction(unittest.TestCase):
def _assert_restricted(self, url, filename, age, old_age=None):
self.assertTrue(_download_restricted(url, filename, old_age))
self.assertFalse(_download_restricted(url, filename, age))
def test_youtube(self):
self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
def test_youporn(self):
self._assert_restricted(
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'505835.mp4', 2, old_age=25)
def test_pornotube(self):
self._assert_restricted(
'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
'1689755.flv', 13)
if __name__ == '__main__':
unittest.main()
-111
View File
@@ -1,111 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_testcases
from youtube_dl.extractor import (
gen_extractors,
JustinTVIE,
YoutubeIE,
)
class TestAllURLsMatching(unittest.TestCase):
def setUp(self):
self.ies = gen_extractors()
def matching_ies(self, url):
return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic']
def assertMatch(self, url, ie_list):
self.assertEqual(self.matching_ies(url), ie_list)
def test_youtube_playlist_matching(self):
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585
assertPlaylist(u'PL63F0C78739B09958')
assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M'))
def test_youtube_matching(self):
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
def test_youtube_channel_matching(self):
assertChannel = lambda url: self.assertMatch(url, ['youtube:channel'])
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM')
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
def test_youtube_user_matching(self):
self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
def test_youtube_feeds(self):
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watch_later'])
self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites'])
def test_youtube_show_matching(self):
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
def test_justin_tv_channelid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"www.justin.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"www.twitch.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv/"))
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/"))
def test_justintv_videoid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/b/328087483"))
def test_justin_tv_chapterid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
def test_youtube_extract(self):
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id)
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
def test_no_duplicates(self):
ies = gen_extractors()
for tc in get_testcases():
url = tc['url']
for ie in ies:
if type(ie).__name__ in ['GenericIE', tc['name'] + 'IE']:
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
else:
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
def test_keywords(self):
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
self.assertMatch(':ytsubscriptions', ['youtube:subscriptions'])
self.assertMatch(':ythistory', ['youtube:history'])
self.assertMatch(':thedailyshow', ['ComedyCentralShows'])
self.assertMatch(':tds', ['ComedyCentralShows'])
self.assertMatch(':colbertreport', ['ComedyCentralShows'])
self.assertMatch(':cr', ['ComedyCentralShows'])
if __name__ == '__main__':
unittest.main()
-178
View File
@@ -1,178 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
get_params,
get_testcases,
try_rm,
md5,
report_warning
)
import hashlib
import io
import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.utils import (
compat_str,
compat_urllib_error,
compat_HTTPError,
DownloadError,
ExtractorError,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs)
def report_warning(self, message):
# Don't accept warnings during tests
raise ExtractorError(message)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
defs = get_testcases()
class TestDownload(unittest.TestCase):
maxDiff = None
def setUp(self):
self.defs = defs
### Dynamically generate tests
def generator(test_case):
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
return
if 'playlist' not in test_case:
info_dict = test_case.get('info_dict', {})
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
print_skipping('The output file cannot be know, the "file" '
'key is missing or the info_dict is incomplete')
return
if 'skip' in test_case:
print_skipping(test_case['skip'])
return
for other_ie in other_ies:
if not other_ie.working():
print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
return
params = get_params(test_case.get('params', {}))
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
finished_hook_called = set()
def _hook(status):
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
ydl.fd.add_progress_hook(_hook)
def get_tc_filename(tc):
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
test_cases = test_case.get('playlist', [test_case])
def try_rm_tcs_files():
for tc in test_cases:
tc_filename = get_tc_filename(tc)
try_rm(tc_filename)
try_rm(tc_filename + '.part')
try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
try_rm_tcs_files()
try:
try_num = 1
while True:
try:
ydl.download([test_case['url']])
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
raise
if try_num == RETRIES:
report_warning(u'Failed due to network errors, skipping...')
return
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
try_num += 1
else:
break
for tc in test_cases:
tc_filename = get_tc_filename(tc)
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
self.assertTrue(tc_filename in finished_hook_called)
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue(os.path.exists(info_json_fn))
if 'md5' in tc:
md5_for_file = _file_md5(tc_filename)
self.assertEqual(md5_for_file, tc['md5'])
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
for (info_field, expected) in tc.get('info_dict', {}).items():
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(info_dict.get(info_field))
else:
got = info_dict.get(info_field)
self.assertEqual(expected, got,
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# If checkable fields are missing from the test case, print the info_dict
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in info_dict.items()
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n')
# Check for the presence of mandatory fields
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(key in info_dict.keys() and info_dict[key])
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(info_dict.get(key), u'Missing field: %s' % key)
finally:
try_rm_tcs_files()
return test_template
### And add them to TestDownload
for n, test_case in enumerate(defs):
test_method = generator(test_case)
tname = 'test_' + str(test_case['name'])
i = 1
while hasattr(TestDownload, tname):
tname = 'test_' + str(test_case['name']) + '_' + str(i)
i += 1
test_method.__name__ = tname
setattr(TestDownload, test_method.__name__, test_method)
del test_method
if __name__ == '__main__':
unittest.main()
-26
View File
@@ -1,26 +0,0 @@
import unittest
import sys
import os
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
_DEV_NULL = subprocess.DEVNULL
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb')
class TestExecution(unittest.TestCase):
def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self):
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self):
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
if __name__ == '__main__':
unittest.main()
-115
View File
@@ -1,115 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import (
DailymotionPlaylistIE,
DailymotionUserIE,
VimeoChannelIE,
UstreamChannelIE,
SoundcloudSetIE,
SoundcloudUserIE,
LivestreamIE,
NHLVideocenterIE,
BambuserChannelIE,
BandcampAlbumIE
)
class TestPlaylists(unittest.TestCase):
def assertIsPlaylist(self, info):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_dailymotion_playlist(self):
dl = FakeYDL()
ie = DailymotionPlaylistIE(dl)
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'SPORT')
self.assertTrue(len(result['entries']) > 20)
def test_dailymotion_user(self):
dl = FakeYDL()
ie = DailymotionUserIE(dl)
result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'Génération Quoi')
self.assertTrue(len(result['entries']) >= 26)
def test_vimeo_channel(self):
dl = FakeYDL()
ie = VimeoChannelIE(dl)
result = ie.extract('http://vimeo.com/channels/tributes')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'Vimeo Tributes')
self.assertTrue(len(result['entries']) > 24)
def test_ustream_channel(self):
dl = FakeYDL()
ie = UstreamChannelIE(dl)
result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'5124905')
self.assertTrue(len(result['entries']) >= 11)
def test_soundcloud_set(self):
dl = FakeYDL()
ie = SoundcloudSetIE(dl)
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'The Royal Concept EP')
self.assertTrue(len(result['entries']) >= 6)
def test_soundcloud_user(self):
dl = FakeYDL()
ie = SoundcloudUserIE(dl)
result = ie.extract('https://soundcloud.com/the-concept-band')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'9615865')
self.assertTrue(len(result['entries']) >= 12)
def test_livestream_event(self):
dl = FakeYDL()
ie = LivestreamIE(dl)
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'TEDCity2.0 (English)')
self.assertTrue(len(result['entries']) >= 4)
def test_nhl_videocenter(self):
dl = FakeYDL()
ie = NHLVideocenterIE(dl)
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'999')
self.assertEqual(result['title'], u'Highlights')
self.assertEqual(len(result['entries']), 12)
def test_bambuser_channel(self):
dl = FakeYDL()
ie = BambuserChannelIE(dl)
result = ie.extract('http://bambuser.com/channel/pixelversity')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'pixelversity')
self.assertTrue(len(result['entries']) >= 60)
def test_bandcamp_album(self):
dl = FakeYDL()
ie = BandcampAlbumIE(dl)
result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], u'Nightmare Night EP')
self.assertTrue(len(result['entries']) >= 4)
if __name__ == '__main__':
unittest.main()
-210
View File
@@ -1,210 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, md5
from youtube_dl.extractor import (
YoutubeIE,
DailymotionIE,
TEDIE,
)
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE(self.DL)
def getInfoDict(self):
info_dict = self.ie.extract(self.url)
return info_dict
def getSubtitles(self):
info_dict = self.getInfoDict()
return info_dict['subtitles']
class TestYoutubeSubtitles(BaseTestSubtitles):
url = 'QRS8MkLhQmM'
IE = YoutubeIE
def getSubtitles(self):
info_dict = self.getInfoDict()
return info_dict[0]['subtitles']
def test_youtube_no_writesubtitles(self):
self.DL.params['writesubtitles'] = False
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_youtube_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
def test_youtube_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
def test_youtube_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 13)
def test_youtube_subtitles_sbv_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'sbv'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
def test_youtube_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'vtt'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '356cdc577fde0c6783b9b822e7206ff7')
def test_youtube_list_subtitles(self):
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_youtube_automatic_captions(self):
self.url = '8YoUxe5ncPo'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.url = 'sAjKT8FhjI8'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
def test_youtube_multiple_langs(self):
self.url = 'QRS8MkLhQmM'
self.DL.params['writesubtitles'] = True
langs = ['it', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
class TestDailymotionSubtitles(BaseTestSubtitles):
url = 'http://www.dailymotion.com/video/xczg00'
IE = DailymotionIE
def test_no_writesubtitles(self):
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
def test_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['fr']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 5)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
def test_multiple_langs(self):
self.DL.params['writesubtitles'] = True
langs = ['es', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
class TestTedSubtitles(BaseTestSubtitles):
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
IE = TEDIE
def test_no_writesubtitles(self):
subtitles = self.getSubtitles()
self.assertEqual(subtitles, None)
def test_subtitles(self):
self.DL.params['writesubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d')
def test_subtitles_lang(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitleslangs'] = ['fr']
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6')
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 28)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_multiple_langs(self):
self.DL.params['writesubtitles'] = True
langs = ['es', 'fr', 'de']
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
if __name__ == '__main__':
unittest.main()
-181
View File
@@ -1,181 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import xml.etree.ElementTree
#from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import (
timeconvert,
sanitize_filename,
unescapeHTML,
orderedSet,
DateRange,
unified_strdate,
find_xpath_attr,
get_meta_content,
xpath_with_ns,
smuggle_url,
unsmuggle_url,
shell_quote,
encodeFilename,
)
if sys.version_info < (3, 0):
_compat_str = lambda b: b.decode('unicode-escape')
else:
_compat_str = lambda s: s
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = _compat_str('\xe4')
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
self.assertEqual(sanitize_filename(tests), tests)
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
def test_daterange(self):
_20century = DateRange("19000101","20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
def test_find_xpath_attr(self):
testxml = u'''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
def test_meta_parser(self):
testhtml = u'''
<head>
<meta name="description" content="foo &amp; bar">
<meta content='Plato' name='author'/>
</head>
'''
get_meta = lambda name: get_meta_content(name, testhtml)
self.assertEqual(get_meta('description'), u'foo & bar')
self.assertEqual(get_meta('author'), 'Plato')
def test_xpath_with_ns(self):
testxml = u'''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, u'The Author')
self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
def test_smuggle_url(self):
data = {u"ö": u"ö", u"abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
if __name__ == '__main__':
unittest.main()
-79
View File
@@ -1,79 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, try_rm
import io
import xml.etree.ElementTree
import youtube_dl.YoutubeDL
import youtube_dl.extractor
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs)
self.to_stderr = self.to_screen
params = get_params({
'writeannotations': True,
'skip_download': True,
'writeinfojson': False,
'format': 'flv',
})
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
annotationsTag = root.find('annotations')
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
#Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) #assertIn only added in python 2.7
#remove the first occurance, there could be more than one annotation with the same text
expected.remove(text)
#We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self):
try_rm(ANNOTATIONS_FILE)
if __name__ == '__main__':
unittest.main()
-74
View File
@@ -1,74 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params
import io
import json
import youtube_dl.YoutubeDL
import youtube_dl.extractor
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs)
self.to_stderr = self.to_screen
params = get_params({
'writeinfojson': True,
'skip_download': True,
'writedescription': True,
})
TEST_ID = 'BaW_jenozKc'
INFO_JSON_FILE = TEST_ID + '.info.json'
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
This is a test video for youtube-dl.
For more information, contact phihag@phihag.de .'''
class TestInfoJSON(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
ydl.download([TEST_ID])
self.assertTrue(os.path.exists(INFO_JSON_FILE))
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
jd = json.load(jsonf)
self.assertEqual(jd['upload_date'], u'20121002')
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
self.assertEqual(jd['id'], TEST_ID)
self.assertEqual(jd['extractor'], 'youtube')
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
self.assertTrue(os.path.exists(DESCRIPTION_FILE))
with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
descr = descf.read()
self.assertEqual(descr, EXPECTED_DESCRIPTION)
def tearDown(self):
if os.path.exists(INFO_JSON_FILE):
os.remove(INFO_JSON_FILE)
if os.path.exists(DESCRIPTION_FILE):
os.remove(DESCRIPTION_FILE)
if __name__ == '__main__':
unittest.main()
-111
View File
@@ -1,111 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import (
YoutubeUserIE,
YoutubePlaylistIE,
YoutubeIE,
YoutubeChannelIE,
YoutubeShowIE,
)
class TestYoutubeLists(unittest.TestCase):
def assertIsPlaylist(self, info):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_youtube_playlist(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'ytdl test PL')
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
def test_youtube_playlist_noplaylist(self):
dl = FakeYDL()
dl.params['noplaylist'] = True
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE()._extract_id(result['url']), 'FXxLjLQi3Fg')
def test_issue_673(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLBB231211A4F62143')
self.assertTrue(len(result['entries']) > 25)
def test_youtube_playlist_long(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertIsPlaylist(result)
self.assertTrue(len(result['entries']) >= 799)
def test_youtube_playlist_with_deleted(self):
#651
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
self.assertFalse('pElCt5oNDuI' in ytie_results)
self.assertFalse('KdPEApIVdWM' in ytie_results)
def test_youtube_playlist_empty(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
self.assertIsPlaylist(result)
self.assertEqual(len(result['entries']), 0)
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
# TODO find a > 100 (paginating?) videos course
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
entries = result['entries']
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
def test_youtube_channel(self):
dl = FakeYDL()
ie = YoutubeChannelIE(dl)
#test paginated channel
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
self.assertTrue(len(result['entries']) > 90)
#test autogenerated channel
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
self.assertTrue(len(result['entries']) >= 18)
def test_youtube_user(self):
dl = FakeYDL()
ie = YoutubeUserIE(dl)
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(result['entries']) >= 320)
def test_youtube_safe_search(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
self.assertEqual(len(result['entries']), 2)
def test_youtube_show(self):
dl = FakeYDL()
ie = YoutubeShowIE(dl)
result = ie.extract('http://www.youtube.com/show/airdisasters')
self.assertTrue(len(result) >= 3)
if __name__ == '__main__':
unittest.main()
-81
View File
@@ -1,81 +0,0 @@
#!/usr/bin/env python
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
_TESTS = [
(
u'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
u'js',
86,
u'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
u'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
u'js',
85,
u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf',
u'swf',
82,
u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321'
),
]
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, stype, sig_length, expected_sig):
basename = url.rpartition('/')[2]
m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
assert m, '%r should follow URL format' % basename
test_id = m.group(1)
def test_func(self):
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ie = YoutubeIE()
if stype == 'js':
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
else:
assert stype == 'swf'
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfcode)
src_sig = compat_str(string.printable[:sig_length])
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
-8
View File
@@ -1,8 +0,0 @@
[tox]
envlist = py26,py27,py33
[testenv]
deps =
nose
coverage
commands = nosetests --verbose {posargs:test} # --with-coverage --cover-package=youtube_dl --cover-html
# test.test_download:TestDownload.test_NowVideo
+1
View File
@@ -0,0 +1 @@
2021.12.17
+14693
View File
File diff suppressed because it is too large Load Diff
+13718
View File
File diff suppressed because it is too large Load Diff
-89
View File
@@ -1,89 +0,0 @@
#!/usr/bin/env python
import sys, os
import json, hashlib
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = compat_urllib_request.urlopen(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
BIN
View File
Binary file not shown.
-683
View File
@@ -1,683 +0,0 @@
import os
import re
import subprocess
import sys
import time
from .utils import (
compat_urllib_error,
compat_urllib_request,
ContentTooShortError,
determine_ext,
encodeFilename,
format_bytes,
sanitize_open,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
"""
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == u'-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + u'.part'
def undo_temp_name(self, filename):
if filename.endswith(u'.part'):
return filename[:-len(u'.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError):
self.report_error(u'unable to rename file')
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen(u'[download] Destination: ' + filename)
def report_progress(self, percent, data_len_str, speed, eta):
"""Report download progress."""
if self.params.get('noprogress', False):
return
clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'')
if eta is not None:
eta_str = self.format_eta(eta)
else:
eta_str = 'Unknown ETA'
if percent is not None:
percent_str = self.format_percent(percent)
else:
percent_str = 'Unknown %'
speed_str = self.format_speed(speed)
if self.params.get('progress_with_newline', False):
self.to_screen(u'[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str))
else:
self.to_screen(u'\r%s[download] %s of %s at %s ETA %s' %
(clear_line, percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
self.to_console_title(u'youtube-dl - %s of %s at %s ETA %s' %
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen(u'[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen(u'[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen(u'[download] Unable to resume')
def report_finish(self, data_len_str, tot_time):
"""Report download finished."""
if self.params.get('noprogress', False):
self.to_screen(u'[download] Download completed')
else:
clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'')
self.to_screen(u'\r%s[download] 100%% of %s in %s' %
(clear_line, data_len_str, self.format_seconds(tot_time)))
def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url, live):
def run_rtmpdump(args):
start = time.time()
resume_percent = None
resume_downloaded_data_len = None
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
cursor_in_new_line = True
proc_stderr_closed = False
while not proc_stderr_closed:
# read line from stderr
line = u''
while True:
char = proc.stderr.read(1)
if not char:
proc_stderr_closed = True
break
if char in [b'\r', b'\n']:
break
line += char.decode('ascii', 'replace')
if not line:
# proc_stderr_closed is True
continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1))*1024)
percent = float(mobj.group(2))
if not resume_percent:
resume_percent = percent
resume_downloaded_data_len = downloaded_data_len
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
data_len = None
if percent > 0:
data_len = int(downloaded_data_len * 100 / percent)
data_len_str = u'~' + format_bytes(data_len)
self.report_progress(percent, data_len_str, speed, eta)
cursor_in_new_line = False
self._hook_progress({
'downloaded_bytes': downloaded_data_len,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'eta': eta,
'speed': speed,
})
elif self.params.get('verbose', False):
if not cursor_in_new_line:
self.to_screen(u'')
cursor_in_new_line = True
self.to_screen(u'[rtmpdump] '+line)
proc.wait()
if not cursor_in_new_line:
self.to_screen(u'')
return proc.returncode
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
test = self.params.get('test', False)
# Check for rtmpdump first
try:
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
if player_url is not None:
basic_args += ['--swfVfy', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if play_path is not None:
basic_args += ['--playpath', play_path]
if tc_url is not None:
basic_args += ['--tcUrl', url]
if test:
basic_args += ['--stop', '1']
if live:
basic_args += ['--live']
args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
if sys.platform == 'win32' and sys.version_info < (3, 0):
# Windows subprocess module does not actually support Unicode
# on Python 2.x
# See http://stackoverflow.com/a/9951851/35070
subprocess_encoding = sys.getfilesystemencoding()
args = [a.encode(subprocess_encoding, 'ignore') for a in args]
else:
subprocess_encoding = None
if self.params.get('verbose', False):
if subprocess_encoding:
str_args = [
a.decode(subprocess_encoding) if isinstance(a, bytes) else a
for a in args]
else:
str_args = args
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
retval = run_rtmpdump(args)
while (retval == 2 or retval == 1) and not test:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == 1:
break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == 2 and cursize > 1024:
self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = 0
break
if retval == 0 or (test and retval == 2):
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'[rtmpdump] %s bytes' % fsize)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'rtmpdump exited with code %d' % retval)
return False
def _download_with_mplayer(self, filename, url):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
# Check for mplayer first
try:
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
return False
# Download using mplayer.
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'mplayer exited with code %d' % retval)
return False
def _download_m3u8_with_ffmpeg(self, filename, url):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
'-bsf:a', 'aac_adtstoasc', tmpfilename]
for program in ['avconv', 'ffmpeg']:
try:
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
break
except (OSError, IOError):
pass
else:
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
cmd = [program] + args
retval = subprocess.call(cmd)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'ffmpeg exited with code %d' % retval)
return False
def _do_download(self, filename, info_dict):
url = info_dict['url']
# Check file already present
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
# Attempt to download using rtmpdump
if url.startswith('rtmp'):
return self._download_with_rtmpdump(filename, url,
info_dict.get('player_url', None),
info_dict.get('page_url', None),
info_dict.get('play_path', None),
info_dict.get('tc_url', None),
info_dict.get('rtmp_live', False))
# Attempt to download using mplayer
if url.startswith('mms') or url.startswith('rtsp'):
return self._download_with_mplayer(filename, url)
# m3u8 manifest are downloaded with ffmpeg
if determine_ext(url) == u'm3u8':
return self._download_m3u8_with_ffmpeg(filename, url)
tmpfilename = self.temp_name(filename)
stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
if 'user_agent' in info_dict:
headers['Youtubedl-user-agent'] = info_dict['user_agent']
basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers)
if self.params.get('test', False):
request.add_header('Range','bytes=0-10240')
# Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)):
resume_len = os.path.getsize(encodeFilename(tmpfilename))
else:
resume_len = 0
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range','bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
count = 0
retries = self.params.get('retries', 0)
while count <= retries:
# Establish connection
try:
if count == 0 and 'urlhandle' in info_dict:
data = info_dict['urlhandle']
data = compat_urllib_request.urlopen(request)
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = compat_urllib_request.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < int(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
})
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.report_error(u'giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
if data_len is not None:
data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
data_len_str = format_bytes(data_len)
byte_counter = 0 + resume_len
block_size = self.params.get('buffersize', 1024)
start = time.time()
while True:
# Download and write
before = time.time()
data_block = data.read(block_size)
after = time.time()
if len(data_block) == 0:
break
byte_counter += len(data_block)
# Open file just in time
if stream is None:
try:
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
assert stream is not None
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error(u'unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
# Progress message
speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
if data_len is None:
eta = percent = None
else:
percent = self.calc_percent(byte_counter, data_len)
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent, data_len_str, speed, eta)
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'eta': eta,
'speed': speed,
})
# Apply rate limit
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
self.try_rename(tmpfilename, filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
return True
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
""" ph gets called on download progress, with a dictionary with the entries
* filename: The final filename
* status: One of "downloading" and "finished"
It can also have some of the following entries:
* downloaded_bytes: Bytes on disks
* total_bytes: Total bytes, None if unknown
* tmpfilename: The filename we're currently writing to
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if unknown
Hooks are guaranteed to be called at least once (with status "finished")
if the download is successful.
"""
self._progress_hooks.append(ph)
-4
View File
@@ -1,4 +0,0 @@
# Legacy file for backwards compatibility, use youtube_dl.extractor instead!
from .extractor.common import InfoExtractor, SearchInfoExtractor
from .extractor import gen_extractors, get_info_extractor
-511
View File
@@ -1,511 +0,0 @@
import os
import subprocess
import sys
import time
from .utils import (
compat_subprocess_get_DEVNULL,
encodeFilename,
PostProcessingError,
shell_quote,
subtitles_filename,
)
class PostProcessor(object):
"""Post Processor class.
PostProcessor objects can be added to downloaders with their
add_post_processor() method. When the downloader has finished a
successful download, it will take its internal chain of PostProcessors
and start calling the run() method on each one of them, first with
an initial argument and then with the returned value of the previous
PostProcessor.
The chain will be stopped if one of them ever returns None or the end
of the chain is reached.
PostProcessor objects follow a "mutual registration" process similar
to InfoExtractor objects.
"""
_downloader = None
def __init__(self, downloader=None):
self._downloader = downloader
def set_downloader(self, downloader):
"""Sets the downloader for this PP."""
self._downloader = downloader
def run(self, information):
"""Run the PostProcessor.
The "information" argument is a dictionary like the ones
composed by InfoExtractors. The only difference is that this
one has an extra field called "filepath" that points to the
downloaded file.
This method returns a tuple, the first element of which describes
whether the original file should be kept (i.e. not deleted - None for
no preference), and the second of which is the updated information.
In addition, this method may raise a PostProcessingError
exception if post processing fails.
"""
return None, information # by default, keep file and do nothing
class FFmpegPostProcessorError(PostProcessingError):
pass
class AudioConversionError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self,downloader=None):
PostProcessor.__init__(self, downloader)
self._exes = self.detect_executables()
@staticmethod
def detect_executables():
def executable(exe):
try:
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((program, executable(program)) for program in programs)
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
if not self._exes['ffmpeg'] and not self._exes['avconv']:
raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
files_cmd = []
for path in input_paths:
files_cmd.extend(['-i', encodeFilename(path)])
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y'] + files_cmd
+ opts +
[encodeFilename(self._ffmpeg_filename_argument(out_path))])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
if fn.startswith(u'-'):
return u'./' + fn
return fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
if not self._exes['ffprobe'] and not self._exes['avprobe']:
raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
try:
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if not self._exes['ffmpeg'] and not self._exes['avconv']:
raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
else:
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if new_path == path:
self._nopostoverwrites = True
try:
if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
else:
self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except:
etype,e,tb = sys.exc_info()
if isinstance(e, AudioConversionError):
msg = u'audio conversion failed: ' + e.msg
else:
msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
raise PostProcessingError(msg)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
try:
os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
except:
self._downloader.report_warning(u'Cannot update utime of audio file')
information['filepath'] = new_path
return self._nopostoverwrites,information
class FFmpegVideoConvertor(FFmpegPostProcessor):
def __init__(self, downloader=None,preferedformat=None):
super(FFmpegVideoConvertor, self).__init__(downloader)
self._preferedformat=preferedformat
def run(self, information):
path = information['filepath']
prefix, sep, ext = path.rpartition(u'.')
outpath = prefix + sep + self._preferedformat
if information['ext'] == self._preferedformat:
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return True,information
self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
self.run_ffmpeg(path, outpath, [])
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return False,information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
def __init__(self, downloader=None, subtitlesformat='srt'):
super(FFmpegEmbedSubtitlePP, self).__init__(downloader)
self._subformat = subtitlesformat
@classmethod
def _conver_lang_code(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
def run(self, information):
if information['ext'] != u'mp4':
self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files')
return True, information
if not information.get('subtitles'):
self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed')
return True, information
sub_langs = [key for key in information['subtitles']]
filename = information['filepath']
input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs]
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text'])
lang_code = self._conver_lang_code(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
opts.extend(['-f', 'mp4'])
temp_filename = filename + u'.temp'
self._downloader.to_screen(u'[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
if info.get('title') is not None:
metadata['title'] = info['title']
if info.get('upload_date') is not None:
metadata['date'] = info['upload_date']
if info.get('uploader') is not None:
metadata['artist'] = info['uploader']
elif info.get('uploader_id') is not None:
metadata['artist'] = info['uploader_id']
if not metadata:
self._downloader.to_screen(u'[ffmpeg] There isn\'t any metadata to add')
return True, info
filename = info['filepath']
ext = os.path.splitext(filename)[1][1:]
temp_filename = filename + u'.temp'
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
options.extend(['-f', ext])
self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return True, info
File diff suppressed because it is too large Load Diff
-699
View File
@@ -1,699 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = (
'Ricardo Garcia Gonzalez',
'Danny Colligan',
'Benjamin Johnson',
'Vasyl\' Vavrychuk',
'Witold Baryluk',
'Paweł Paprota',
'Gergely Imreh',
'Rogério Brito',
'Philipp Hagemeister',
'Sören Schulze',
'Kevin Ngo',
'Ori Avtalion',
'shizeeg',
'Filippo Valsorda',
'Christian Albrecht',
'Dave Vasilevsky',
'Jaime Marquínez Ferrándiz',
'Jeff Crouse',
'Osama Khalid',
'Michael Walter',
'M. Yasoob Ullah Khalid',
'Julien Fraichard',
'Johny Mo Swag',
'Axel Noack',
'Albert Kim',
'Pierre Rudloff',
'Huarong Huo',
'Ismael Mejía',
'Steffan \'Ruirize\' James',
'Andras Elso',
'Jelle van der Waa',
'Marcin Cieślak',
'Anton Larionov',
'Takuya Tsuchida',
)
__license__ = 'Public Domain'
import codecs
import getpass
import optparse
import os
import random
import re
import shlex
import subprocess
import sys
from .utils import (
compat_print,
DateRange,
decodeOption,
determine_ext,
DownloadError,
get_cachedir,
MaxDownloadsReached,
preferredencoding,
SameFileError,
std_headers,
write_string,
)
from .update import update_self
from .FileDownloader import (
FileDownloader,
)
from .extractor import gen_extractors
from .version import __version__
from .YoutubeDL import YoutubeDL
from .PostProcessor import (
FFmpegMetadataPP,
FFmpegVideoConvertor,
FFmpegExtractAudioPP,
FFmpegEmbedSubtitlePP,
)
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes):
try:
optionf = open(filename_bytes)
except IOError:
return [] # silently skip if file is not present
try:
res = []
for l in optionf:
res += shlex.split(l, comments=True)
finally:
optionf.close()
return res
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value(): opts.append(' %s' % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _find_term_columns():
columns = os.environ.get('COLUMNS', None)
if columns:
return int(columns)
try:
sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = sp.communicate()
return int(out.split()[1])
except:
pass
return None
def _hide_login_info(opts):
opts = list(opts)
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i+1] = '<PRIVATE>'
except ValueError:
pass
return opts
max_width = 80
max_help_position = 80
# No need to wrap help messages if we're on a wide console
columns = _find_term_columns()
if columns: max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version' : __version__,
'formatter' : fmt,
'usage' : '%prog [options] url [url...]',
'conflict_handler' : 'resolve',
}
parser = optparse.OptionParser(**kw)
# option groups
general = optparse.OptionGroup(parser, 'General Options')
selection = optparse.OptionGroup(parser, 'Video Selection')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
downloader = optparse.OptionGroup(parser, 'Download Options')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
general.add_option('-h', '--help',
action='help', help='print this help text and exit')
general.add_option('-v', '--version',
action='version', help='print program version and exit')
general.add_option('-U', '--update',
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
general.add_option('--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option('--dump-user-agent',
action='store_true', dest='dump_user_agent',
help='display the current browser identification', default=False)
general.add_option('--user-agent',
dest='user_agent', help='specify a custom user agent', metavar='UA')
general.add_option('--referer',
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
metavar='REF', default=None)
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
general.add_option('--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions',
help='Output descriptions of all supported extractors', default=False)
general.add_option('--proxy', dest='proxy', default=None, help='Use the specified HTTP/HTTPS proxy', metavar='URL')
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
general.add_option(
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .')
general.add_option(
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
help='Disable filesystem caching')
selection.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
selection.add_option('--playlist-end',
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option('--max-downloads', metavar='NUMBER',
dest='max_downloads', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
help='download only videos suitable for the given age',
default=None, type=int)
selection.add_option('--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not present in the archive file. Record all downloaded videos in it.')
authentication.add_option('-u', '--username',
dest='username', metavar='USERNAME', help='account username')
authentication.add_option('-p', '--password',
dest='password', metavar='PASSWORD', help='account password')
authentication.add_option('-n', '--netrc',
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
authentication.add_option('--video-password',
dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)')
video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', default='best',
help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
video_format.add_option('--all-formats',
action='store_const', dest='format', help='download all available video formats', const='all')
video_format.add_option('--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
subtitles.add_option('--write-sub', '--write-srt',
action='store_true', dest='writesubtitles',
help='write subtitle file', default=False)
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub',
help='write automatic subtitle file (youtube only)', default=False)
subtitles.add_option('--all-subs',
action='store_true', dest='allsubtitles',
help='downloads all the available subtitles of the video', default=False)
subtitles.add_option('--list-subs',
action='store_true', dest='listsubtitles',
help='lists all available subtitles for the video', default=False)
subtitles.add_option('--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT',
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
downloader.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
downloader.add_option('--buffer-size',
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
downloader.add_option('--no-resize-buffer',
action='store_true', dest='noresizebuffer',
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
verbosity.add_option('-s', '--simulate',
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
verbosity.add_option('--skip-download',
action='store_true', dest='skip_download', help='do not download the video', default=False)
verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
verbosity.add_option('--get-id',
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
verbosity.add_option('--get-thumbnail',
action='store_true', dest='getthumbnail',
help='simulate, quiet but print thumbnail URL', default=False)
verbosity.add_option('--get-description',
action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-filename',
action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False)
verbosity.add_option('--get-format',
action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False)
verbosity.add_option('-j', '--dump-json',
action='store_true', dest='dumpjson',
help='simulate, quiet but print JSON information', default=False)
verbosity.add_option('--newline',
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title',
action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False)
verbosity.add_option('-v', '--verbose',
action='store_true', dest='verbose', help='print various debugging information', default=False)
verbosity.add_option('--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='print downloaded pages to debug problems(very verbose)')
verbosity.add_option('--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded pages to files in the current directory')
verbosity.add_option('--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
filesystem.add_option('--id',
action='store_true', dest='useid', help='use only video ID in file name', default=False)
filesystem.add_option('-l', '--literal',
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
filesystem.add_option('-A', '--auto-number',
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, '
'%(format)s for the format description (like "22 - 1280x720" or "HD"),'
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),'
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id , %(playlist)s for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option('--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option('--restrict-filenames',
action='store_true', dest='restrictfilenames',
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
filesystem.add_option('-a', '--batch-file',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue',
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
filesystem.add_option('--no-continue',
action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)')
filesystem.add_option('--cookies',
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
filesystem.add_option('--no-part',
action='store_true', dest='nopart', help='do not use .part files', default=False)
filesystem.add_option('--no-mtime',
action='store_false', dest='updatetime',
help='do not use the Last-modified header to set the file modification time', default=True)
filesystem.add_option('--write-description',
action='store_true', dest='writedescription',
help='write video description to a .description file', default=False)
filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False)
filesystem.add_option('--write-annotations',
action='store_true', dest='writeannotations',
help='write video annotations to a .annotation file', default=False)
filesystem.add_option('--write-thumbnail',
action='store_true', dest='writethumbnail',
help='write thumbnail image to disk', default=False)
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default')
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
help='embed subtitles in the video (only for mp4 videos)')
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
help='add metadata to the files')
parser.add_option_group(general)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(verbosity)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
systemConf = _readOptions('/etc/youtube-dl.conf')
userConf = _readOptions(userConfFile)
commandLineConf = sys.argv[1:]
argv = systemConf + userConf + commandLineConf
opts, args = parser.parse_args(argv)
if opts.verbose:
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
return parser, opts, args
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
parser, opts, args = parseOpts(argv)
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
# Set referer
if opts.referer is not None:
std_headers['Referer'] = opts.referer
# Dump user agent
if opts.dump_user_agent:
compat_print(std_headers['User-Agent'])
sys.exit(0)
# Batch file verification
batchurls = []
if opts.batchfile is not None:
try:
if opts.batchfile == '-':
batchfd = sys.stdin
else:
batchfd = open(opts.batchfile, 'r')
batchurls = batchfd.readlines()
batchurls = [x.strip() for x in batchurls]
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
if opts.verbose:
write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n')
except IOError:
sys.exit(u'ERROR: batch file could not be read')
all_urls = batchurls + args
all_urls = [url.strip() for url in all_urls]
extractors = gen_extractors()
if opts.list_extractors:
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = [url for url in all_urls if ie.suitable(url)]
all_urls = [url for url in all_urls if url not in matchedUrls]
for mu in matchedUrls:
compat_print(u' ' + mu)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
if not ie._WORKING:
continue
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise')
_COUNTS = (u'', u'5', u'10', u'all')
desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
compat_print(desc)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error(u' account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error(u'using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid:
parser.error(u'using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = getpass.getpass(u'Type account password and press return:')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error(u'invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error(u'invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error(u'invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.retries is not None:
try:
opts.retries = int(opts.retries)
except (TypeError, ValueError):
parser.error(u'invalid retry count specified')
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error(u'invalid buffer size specified')
opts.buffersize = numeric_buffersize
try:
opts.playliststart = int(opts.playliststart)
if opts.playliststart <= 0:
raise ValueError(u'Playlist start must be positive')
except (TypeError, ValueError):
parser.error(u'invalid playlist start number specified')
try:
opts.playlistend = int(opts.playlistend)
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
raise ValueError(u'Playlist end must be greater than playlist start')
except (TypeError, ValueError):
parser.error(u'invalid playlist end number specified')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error(u'invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error(u'invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg']:
parser.error(u'invalid video recode format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and (opts.writeautomaticsub == False):
opts.writesubtitles = True
if sys.version_info < (3,):
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
if opts.outtmpl is not None:
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(title)s-%(id)s.%(ext)s')
if '%(ext)s' not in outtmpl and opts.extractaudio:
parser.error(u'Cannot download a video and extract audio into the same'
u' file! Use "%%(ext)s" instead of %r' %
determine_ext(outtmpl, u''))
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'videopassword': opts.videopassword,
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson,
'simulate': opts.simulate,
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),
'format': opts.format,
'format_limit': opts.format_limit,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts.retries,
'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'noplaylist': opts.noplaylist,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeannotations': opts.writeannotations,
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'writesubtitles': opts.writesubtitles,
'writeautomaticsub': opts.writeautomaticsub,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslangs': opts.subtitleslangs,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'write_pages': opts.write_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize,
'daterange': date,
'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code,
'age_limit': opts.age_limit,
'download_archive': opts.download_archive,
'cookiefile': opts.cookiefile,
'nocheckcertificate': opts.no_check_certificate,
}
with YoutubeDL(ydl_opts) as ydl:
ydl.print_debug_header()
ydl.add_default_info_extractors()
# PostProcessors
# Add the metadata pp first, the other pps will copy it
if opts.addmetadata:
ydl.add_post_processor(FFmpegMetadataPP())
if opts.extractaudio:
ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
if opts.recodevideo:
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
if opts.embedsubtitles:
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose)
# Maybe do nothing
if len(all_urls) < 1:
if not opts.update_self:
parser.error(u'you must provide at least one URL')
else:
sys.exit()
try:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen(u'--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
def main(argv=None):
try:
_real_main(argv)
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit(u'ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit(u'\nERROR: Interrupted by user')
-18
View File
@@ -1,18 +0,0 @@
#!/usr/bin/env python
# Execute with
# $ python youtube_dl/__main__.py (2.6+)
# $ python -m youtube_dl (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(path)))
import youtube_dl
if __name__ == '__main__':
youtube_dl.main()
-202
View File
@@ -1,202 +0,0 @@
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_decrypt_text']
import base64
from math import ceil
from .utils import bytes_to_intlist, intlist_to_bytes
BLOCK_SIZE_BYTES = 16
def aes_ctr_decrypt(data, key, counter):
"""
Decrypt with aes in counter mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
returns the next counter block
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[]
for i in range(block_count):
counter_block = counter.next_value()
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block)
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def key_expansion(data):
"""
Generate key schedule
@param {int[]} data 16/24/32-Byte cipher key
@returns {int[]} 176/208/240-Byte expanded key
"""
data = data[:] # copy
rcon_iteration = 1
key_size_bytes = len(data)
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
while len(data) < expanded_key_size_bytes:
temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
for _ in range(3):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
if key_size_bytes == 32:
temp = data[-4:]
temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
data = data[:expanded_key_size_bytes]
return data
def aes_encrypt(data, expanded_key):
"""
Encrypt one block with aes
@param {int[]} data 16-Byte state
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte cipher
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
for i in range(1, rounds+1):
data = sub_bytes(data)
data = shift_rows(data)
if i != rounds:
data = mix_columns(data)
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
return data
def aes_decrypt_text(data, password, key_size_bytes):
"""
Decrypt text
- The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
- Mode of operation is 'counter'
@param {str} data Base64 encoded string
@param {str,unicode} password Password (will be encoded with utf-8)
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
@returns {str} Decrypted data
"""
NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data))
password = bytes_to_intlist(password.encode('utf-8'))
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:]
class Counter:
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
plaintext = intlist_to_bytes(decrypted_data)
return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
MIX_COLUMN_MATRIX = ((2,3,1,1),
(1,2,3,1),
(1,1,2,3),
(3,1,1,2))
def sub_bytes(data):
return [SBOX[x] for x in data]
def rotate(data):
return data[1:] + [data[0]]
def key_schedule_core(data, rcon_iteration):
data = rotate(data)
data = sub_bytes(data)
data[0] = data[0] ^ RCON[rcon_iteration]
return data
def xor(data1, data2):
return [x^y for x, y in zip(data1, data2)]
def mix_column(data):
data_mixed = []
for row in range(4):
mixed = 0
for column in range(4):
addend = data[column]
if MIX_COLUMN_MATRIX[row][column] in (2,3):
addend <<= 1
if addend > 0xff:
addend &= 0xff
addend ^= 0x1b
if MIX_COLUMN_MATRIX[row][column] == 3:
addend ^= data[column]
mixed ^= addend & 0xff
data_mixed.append(mixed)
return data_mixed
def mix_columns(data):
data_mixed = []
for i in range(4):
column = data[i*4 : (i+1)*4]
data_mixed += mix_column(column)
return data_mixed
def shift_rows(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
return data_shifted
def inc(data):
data = data[:] # copy
for i in range(len(data)-1,-1,-1):
if data[i] == 255:
data[i] = 0
else:
data[i] = data[i] + 1
break
return data
-211
View File
@@ -1,211 +0,0 @@
from .appletrailers import AppleTrailersIE
from .addanime import AddAnimeIE
from .anitube import AnitubeIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVFutureIE,
)
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .c56 import C56IE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cnn import CNNIE
from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .criterion import CriterionIE
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .depositfiles import DepositFilesIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .defense import DefenseGouvFrIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .eitb import EitbIE
from .escapist import EscapistIE
from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
France2IE,
GenerationQuoiIE
)
from .freesound import FreesoundIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .hark import HarkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .hypem import HypemIE
from .ign import IGNIE, OneUPIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE
from .internetvideoarchive import InternetVideoArchiveIE
from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE
from .mixcloud import MixcloudIE
from .mofosex import MofosexIE
from .mtv import MTVIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import NBCNewsIE
from .newgrounds import NewgroundsIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .nowvideo import NowVideoIE
from .ooyala import OoyalaIE
from .orf import ORFIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtlnow import RTLnowIE
from .rutube import RutubeIE
from .sina import SinaIE
from .slashdot import SlashdotIE
from .slideshare import SlideshareIE
from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tf1 import TF1IE
from .thisav import ThisAVIE
from .toutv import TouTvIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvp import TvpIE
from .unistra import UnistraIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vevo import VevoIE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE
from .vimeo import VimeoIE, VimeoChannelIE
from .vine import VineIE
from .viki import VikiIE
from .vk import VKIE
from .wat import WatIE
from .websurg import WeBSurgIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .worldstarhiphop import WorldStarHipHopIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeIE
from .yahoo import YahooIE, YahooSearchIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubePlaylistIE,
YoutubeSearchIE,
YoutubeSearchDateIE,
YoutubeUserIE,
YoutubeChannelIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeRecommendedIE,
YoutubeTruncatedURLIE,
YoutubeWatchLaterIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
)
from .zdf import ZDFIE
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
-86
View File
@@ -1,86 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
compat_HTTPError,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
ExtractorError,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
IE_NAME = u'AddAnime'
_TEST = {
u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
u'file': u'24MR3YO5SAS9.mp4',
u'md5': u'72954ea10bc979ab5e2eb288b21425a0',
u'info_dict': {
u"description": u"One Piece 606",
u"title": u"One Piece 606"
}
}
def _real_extract(self, url):
try:
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
ee.cause.code != 503:
raise
redir_webpage = ee.cause.read().decode('utf-8')
action = self._search_regex(
r'<form id="challenge-form" action="([^"]+)"',
redir_webpage, u'Redirect form')
vc = self._search_regex(
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
redir_webpage, u'redirect vc value')
av = re.search(
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError(u'Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + u'://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse.urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
note=u'Confirming after redirect')
webpage = self._download_webpage(url, video_id)
formats = []
for format_id in ('normal', 'hq'):
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
video_url = self._search_regex(rex, webpage, u'video file URLx',
fatal=False)
if not video_url:
continue
formats.append({
'format_id': format_id,
'url': video_url,
})
if not formats:
raise ExtractorError(u'Cannot find any video format!')
video_title = self._og_search_title(webpage)
video_description = self._og_search_description(webpage)
return {
'_type': 'video',
'id': video_id,
'formats': formats,
'title': video_title,
'description': video_description
}
-55
View File
@@ -1,55 +0,0 @@
import re
import xml.etree.ElementTree
from .common import InfoExtractor
class AnitubeIE(InfoExtractor):
IE_NAME = u'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
_TEST = {
u'url': u'http://www.anitube.se/video/36621',
u'md5': u'59d0eeae28ea0bc8c05e7af429998d43',
u'file': u'36621.mp4',
u'info_dict': {
u'id': u'36621',
u'ext': u'mp4',
u'title': u'Recorder to Randoseru 01',
},
u'skip': u'Blocked in the US',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
key = self._html_search_regex(r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)',
webpage, u'key')
webpage_config = self._download_webpage('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
key)
config_xml = xml.etree.ElementTree.fromstring(webpage_config.encode('utf-8'))
video_title = config_xml.find('title').text
formats = []
video_url = config_xml.find('file')
if video_url is not None:
formats.append({
'format_id': 'sd',
'url': video_url.text,
})
video_url = config_xml.find('filehd')
if video_url is not None:
formats.append({
'format_id': 'hd',
'url': video_url.text,
})
return {
'id': video_id,
'title': video_title,
'formats': formats
}
-138
View File
@@ -1,138 +0,0 @@
import re
import xml.etree.ElementTree
import json
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
determine_ext,
)
class AppleTrailersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trailers.apple.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TEST = {
u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/",
u"playlist": [
{
u"file": u"manofsteel-trailer4.mov",
u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8",
u"info_dict": {
u"duration": 111,
u"title": u"Trailer 4",
u"upload_date": u"20130523",
u"uploader_id": u"wb",
},
},
{
u"file": u"manofsteel-trailer3.mov",
u"md5": u"b8017b7131b721fb4e8d6f49e1df908c",
u"info_dict": {
u"duration": 182,
u"title": u"Trailer 3",
u"upload_date": u"20130417",
u"uploader_id": u"wb",
},
},
{
u"file": u"manofsteel-trailer.mov",
u"md5": u"d0f1e1150989b9924679b441f3404d48",
u"info_dict": {
u"duration": 148,
u"title": u"Trailer",
u"upload_date": u"20121212",
u"uploader_id": u"wb",
},
},
{
u"file": u"manofsteel-teaser.mov",
u"md5": u"5fe08795b943eb2e757fa95cb6def1cb",
u"info_dict": {
u"duration": 93,
u"title": u"Teaser",
u"upload_date": u"20120721",
u"uploader_id": u"wb",
},
}
]
}
_JSON_RE = r'iTunes.playURL\((.*?)\);'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
movie = mobj.group('movie')
uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
playlist_snippet = self._download_webpage(playlist_url, movie)
playlist_cleaned = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', playlist_snippet)
playlist_cleaned = re.sub(r'<img ([^<]*?)>', r'<img \1/>', playlist_cleaned)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# with xml.etree.ElementTree.fromstring
# like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m):
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;')
playlist_cleaned = re.sub(self._JSON_RE, _clean_json, playlist_cleaned)
playlist_html = u'<html>' + playlist_cleaned + u'</html>'
doc = xml.etree.ElementTree.fromstring(playlist_html)
playlist = []
for li in doc.findall('./div/ul/li'):
on_click = li.find('.//a').attrib['onClick']
trailer_info_json = self._search_regex(self._JSON_RE,
on_click, u'trailer info')
trailer_info = json.loads(trailer_info_json)
title = trailer_info['title']
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
thumbnail = li.find('.//img').attrib['src']
upload_date = trailer_info['posted'].replace('-', '')
runtime = trailer_info['runtime']
m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime)
duration = None
if m:
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
first_url = trailer_info['url']
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
settings_json = self._download_webpage(settings_json_url, trailer_id, u'Downloading settings json')
settings = json.loads(settings_json)
formats = []
for format in settings['metadata']['sizes']:
# The src is a file pointing to the real video file
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
formats.append({
'url': format_url,
'ext': determine_ext(format_url),
'format': format['type'],
'width': format['width'],
'height': int(format['height']),
})
formats = sorted(formats, key=lambda f: (f['height'], f['width']))
info = {
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader_id': uploader_id,
'user_agent': 'QuickTime compatible (youtube-dl)',
}
# TODO: Remove when #980 has been merged
info['url'] = formats[-1]['url']
info['ext'] = formats[-1]['ext']
playlist.append(info)
return {
'_type': 'playlist',
'id': movie,
'entries': playlist,
}
-68
View File
@@ -1,68 +0,0 @@
import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
unified_strdate,
)
class ArchiveOrgIE(InfoExtractor):
IE_NAME = 'archive.org'
IE_DESC = 'archive.org videos'
_VALID_URL = r'(?:https?://)?(?:www\.)?archive.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
_TEST = {
u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
u'md5': u'8af1d4cf447933ed3c7f4871162602db',
u'info_dict': {
u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
u"upload_date": u"19681210",
u"uploader": u"SRI International"
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
json_data = self._download_webpage(json_url, video_id)
data = json.loads(json_data)
title = data['metadata']['title'][0]
description = data['metadata']['description'][0]
uploader = data['metadata']['creator'][0]
upload_date = unified_strdate(data['metadata']['date'][0])
formats = [{
'format': fdata['format'],
'url': 'http://' + data['server'] + data['dir'] + fn,
'file_size': int(fdata['size']),
}
for fn,fdata in data['files'].items()
if 'Video' in fdata['format']]
formats.sort(key=lambda fdata: fdata['file_size'])
for f in formats:
f['ext'] = determine_ext(f['url'])
info = {
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
'upload_date': upload_date,
}
thumbnail = data.get('misc', {}).get('image')
if thumbnail:
info['thumbnail'] = thumbnail
# TODO: Remove when #980 has been merged
info.update(formats[-1])
return info
-54
View File
@@ -1,54 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class ARDIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
_TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
_MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
_TEST = {
u'url': u'http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640',
u'file': u'14077640.mp4',
u'md5': u'6ca8824255460c787376353f9e20bbd8',
u'info_dict': {
u"title": u"11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
},
u'skip': u'Requires rtmpdump'
}
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
numid = re.search(r'documentId=([0-9]+)', url)
if numid:
video_id = numid.group(1)
else:
video_id = m.group('video_id')
# determine title and media streams from webpage
html = self._download_webpage(url, video_id)
title = re.search(self._TITLE, html).group('title')
streams = [mo.groupdict() for mo in re.finditer(self._MEDIA_STREAM, html)]
if not streams:
assert '"fsk"' in html
raise ExtractorError(u'This video is only available after 8:00 pm')
# choose default media type and highest quality for now
stream = max([s for s in streams if int(s["media_type"]) == 0],
key=lambda s: int(s["quality"]))
# there's two possibilities: RTMP stream or HTTP download
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
if stream['rtmp_url']:
self.to_screen(u'RTMP download detected')
assert stream['video_url'].startswith('mp4:')
info["url"] = stream["rtmp_url"]
info["play_path"] = stream['video_url']
else:
assert stream["video_url"].endswith('.mp4')
info["url"] = stream["video_url"]
return [info]
-262
View File
@@ -1,262 +0,0 @@
# encoding: utf-8
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
find_xpath_attr,
unified_strdate,
determine_ext,
get_element_by_id,
compat_str,
)
# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
class ArteTvIE(InfoExtractor):
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
_LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
_LIVE_URL = r'index-[0-9]+\.html$'
IE_NAME = u'arte.tv'
@classmethod
def suitable(cls, url):
return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL))
# TODO implement Live Stream
# from ..utils import compat_urllib_parse
# def extractLiveStream(self, url):
# video_lang = url.split('/')[-4]
# info = self.grep_webpage(
# url,
# r'src="(.*?/videothek_js.*?\.js)',
# 0,
# [
# (1, 'url', u'Invalid URL: %s' % url)
# ]
# )
# http_host = url.split('/')[2]
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
# info = self.grep_webpage(
# next_url,
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
# '(http://.*?\.swf).*?' +
# '(rtmp://.*?)\'',
# re.DOTALL,
# [
# (1, 'path', u'could not extract video path: %s' % url),
# (2, 'player', u'could not extract video player: %s' % url),
# (3, 'url', u'could not extract video url: %s' % url)
# ]
# )
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
def _real_extract(self, url):
mobj = re.match(self._VIDEOS_URL, url)
if mobj is not None:
id = mobj.group('id')
lang = mobj.group('lang')
return self._extract_video(url, id, lang)
mobj = re.match(self._LIVEWEB_URL, url)
if mobj is not None:
name = mobj.group('name')
lang = mobj.group('lang')
return self._extract_liveweb(url, name, lang)
if re.search(self._LIVE_URL, url) is not None:
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
# self.extractLiveStream(url)
# return
def _extract_video(self, url, video_id, lang):
"""Extract from videos.arte.tv"""
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
config_xml_url = config_node.attrib['ref']
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
def _key(m):
quality = m.group('quality')
if quality == 'hd':
return 2
else:
return 1
# We pick the best quality
video_urls = sorted(video_urls, key=_key)
video_url = list(video_urls)[-1].group('url')
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
config_xml, 'thumbnail')
return {'id': video_id,
'title': title,
'thumbnail': thumbnail,
'url': video_url,
'ext': 'flv',
}
def _extract_liveweb(self, url, name, lang):
"""Extract form http://liveweb.arte.tv/"""
webpage = self._download_webpage(url, name)
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
video_id, u'Downloading information')
config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
event_doc = config_doc.find('event')
url_node = event_doc.find('video').find('urlHd')
if url_node is None:
url_node = event_doc.find('urlSd')
return {'id': video_id,
'title': event_doc.find('name%s' % lang.capitalize()).text,
'url': url_node.text.replace('MP4', 'mp4'),
'ext': 'flv',
'thumbnail': self._og_search_thumbnail(webpage),
}
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = u'arte.tv:+7'
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
# This is not a real id, it can be for example AJT for the news
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, video_id)
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
self.report_extraction(video_id)
info = json.loads(json_info)
player_info = info['videoJsonPlayer']
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
all_formats = player_info['VSR'].values()
# Some formats use the m3u8 protocol
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
def _match_lang(f):
if f.get('versionCode') is None:
return True
# Return true if that format is in the language of the url
if lang == 'fr':
l = 'F'
elif lang == 'de':
l = 'A'
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
return any(re.match(r, f['versionCode']) for r in regexes)
# Some formats may not be in the same language as the url
formats = filter(_match_lang, all_formats)
formats = list(formats) # in python3 filter returns an iterator
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
if all(f['versionCode'] == 'VO' for f in all_formats):
formats = all_formats
else:
raise ExtractorError(u'The formats list is empty')
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
def sort_key(f):
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else:
def sort_key(f):
return (
# Sort first by quality
int(f.get('height',-1)),
int(f.get('bitrate',-1)),
# The original version with subtitles has lower relevance
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
# The version with sourds/mal subtitles has also lower relevance
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
)
formats = sorted(formats, key=sort_key)
def _format(format_info):
quality = ''
height = format_info.get('height')
if height is not None:
quality = compat_str(height)
bitrate = format_info.get('bitrate')
if bitrate is not None:
quality += '-%d' % bitrate
if format_info.get('versionCode') is not None:
format_id = u'%s-%s' % (quality, format_info['versionCode'])
else:
format_id = quality
info = {
'format_id': format_id,
'format_note': format_info.get('versionLibelle'),
'width': format_info.get('width'),
'height': height,
}
if format_info['mediaType'] == u'rtmp':
info['url'] = format_info['streamer']
info['play_path'] = 'mp4:' + format_info['url']
info['ext'] = 'flv'
else:
info['url'] = format_info['url']
info['ext'] = determine_ext(info['url'])
return info
info_dict['formats'] = [_format(f) for f in formats]
return info_dict
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:creative'
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
_TEST = {
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
u'file': u'050489-002.mp4',
u'info_dict': {
u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
},
}
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:future'
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
_TEST = {
u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
u'file': u'050940-003.mp4',
u'info_dict': {
u'title': u'Les champignons au secours de la planète',
},
}
def _real_extract(self, url):
anchor_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, anchor_id)
row = get_element_by_id(anchor_id, webpage)
return self._extract_from_webpage(row, anchor_id, lang)
-49
View File
@@ -1,49 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
determine_ext,
ExtractorError,
)
class AUEngineIE(InfoExtractor):
_TEST = {
u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
u'file': u'lfvlytY6.mp4',
u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f',
u'info_dict': {
u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]"
}
}
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed.php\?.*?file=([^&]+).*?'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
webpage, u'title')
title = title.strip()
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
links = map(compat_urllib_parse.unquote, links)
thumbnail = None
video_url = None
for link in links:
if link.endswith('.png'):
thumbnail = link
elif '/videos/' in link:
video_url = link
if not video_url:
raise ExtractorError(u'Could not find video URL')
ext = u'.' + determine_ext(video_url)
if ext == title[-len(ext):]:
title = title[:-len(ext)]
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
}
-86
View File
@@ -1,86 +0,0 @@
import re
import json
import itertools
from .common import InfoExtractor
from ..utils import (
compat_urllib_request,
)
class BambuserIE(InfoExtractor):
IE_NAME = u'bambuser'
_VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
_API_KEY = '005f64509e19a868399060af746a00aa'
_TEST = {
u'url': u'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
#u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
u'info_dict': {
u'id': u'4050584',
u'ext': u'flv',
u'title': u'Education engineering days - lightning talks',
u'duration': 3741,
u'uploader': u'pixelversity',
u'uploader_id': u'344706',
},
u'params': {
# It doesn't respect the 'Range' header, it would download the whole video
# caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
u'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
info_json = self._download_webpage(info_url, video_id)
info = json.loads(info_json)['result']
return {
'id': video_id,
'title': info['title'],
'url': info['url'],
'thumbnail': info.get('preview'),
'duration': int(info['length']),
'view_count': int(info['views_total']),
'uploader': info['username'],
'uploader_id': info['uid'],
}
class BambuserChannelIE(InfoExtractor):
IE_NAME = u'bambuser:channel'
_VALID_URL = r'http://bambuser.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
urls = []
last_id = ''
for i in itertools.count(1):
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
'&method=broadcast&format=json&vid_older_than={last}'
).format(user=user, count=self._STEP, last=last_id)
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
info_json = self._download_webpage(req, user,
u'Downloading page %d' % i)
results = json.loads(info_json)['result']
if len(results) == 0:
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
return {
'_type': 'playlist',
'title': user,
'entries': urls,
}
-129
View File
@@ -1,129 +0,0 @@
import json
import re
from .common import InfoExtractor
from ..utils import (
compat_str,
compat_urlparse,
ExtractorError,
)
class BandcampIE(InfoExtractor):
IE_NAME = u'Bandcamp'
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{
u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
u'file': u'1812978515.mp3',
u'md5': u'cdeb30cdae1921719a3cbcab696ef53c',
u'info_dict': {
u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"
},
u'skip': u'There is a limit of 200 free downloads / month for the test song'
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
# We get the link to the free download page
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
if m_download is None:
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
if m_trackinfo:
json_code = m_trackinfo.group(1)
data = json.loads(json_code)
for d in data:
formats = [{
'format_id': 'format_id',
'url': format_url,
'ext': format_id.partition('-')[0]
} for format_id, format_url in sorted(d['file'].items())]
return {
'id': compat_str(d['id']),
'title': d['title'],
'formats': formats,
}
else:
raise ExtractorError(u'No free songs found')
download_link = m_download.group(1)
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE|re.DOTALL).group('id')
download_webpage = self._download_webpage(download_link, id,
'Downloading free downloads page')
# We get the dictionary of the track from some javascrip code
info = re.search(r'items: (.*?),$',
download_webpage, re.MULTILINE).group(1)
info = json.loads(info)[0]
# We pick mp3-320 for now, until format selection can be easily implemented.
mp3_info = info[u'downloads'][u'mp3-320']
# If we try to use this url it says the link has expired
initial_url = mp3_info[u'url']
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
m_url = re.match(re_url, initial_url)
#We build the url we will use to get the final track url
# This url is build in Bandcamp in the script download_bunde_*.js
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be
#in the "download_url" key
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
track_info = {'id':id,
'title' : info[u'title'],
'ext' : 'mp3',
'url' : final_url,
'thumbnail' : info[u'thumb_url'],
'uploader' : info[u'artist']
}
return [track_info]
class BandcampAlbumIE(InfoExtractor):
IE_NAME = u'Bandcamp:album'
_VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
_TEST = {
u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
u'playlist': [
{
u'file': u'1353101989.mp3',
u'md5': u'39bc1eded3476e927c724321ddf116cf',
u'info_dict': {
u'title': u'Intro',
}
},
{
u'file': u'38097443.mp3',
u'md5': u'1a2c32e2691474643e912cc6cd4bffaa',
u'info_dict': {
u'title': u'Kero One - Keep It Alive (Blazo remix)',
}
},
],
u'params': {
u'playlistend': 2
},
u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
if not tracks_paths:
raise ExtractorError(u'The page doesn\'t contain any track')
entries = [
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
for t_path in tracks_paths]
title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title')
return {
'_type': 'playlist',
'title': title,
'entries': entries,
}
-193
View File
@@ -1,193 +0,0 @@
import datetime
import json
import os
import re
import socket
from .common import InfoExtractor
from ..utils import (
compat_http_client,
compat_parse_qs,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urllib_request,
ExtractorError,
unescapeHTML,
)
class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv"""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
_URL_EXT = r'^.*\.([a-z0-9]+)$'
IE_NAME = u'blip.tv'
_TEST = {
u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
u'file': u'5779306.m4v',
u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
u'info_dict': {
u"upload_date": u"20111205",
u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",
u"uploader": u"Comic Book Resources - CBR TV",
u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
}
}
def report_direct_download(self, title):
"""Report information extraction."""
self.to_screen(u'%s: Direct download detected' % title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
# See https://github.com/rg3/youtube-dl/issues/857
api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
if api_mobj is not None:
url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
urlp = compat_urllib_parse_urlparse(url)
if urlp.path.startswith('/play/'):
request = compat_urllib_request.Request(url)
response = compat_urllib_request.urlopen(request)
redirecturl = response.geturl()
rurlp = compat_urllib_parse_urlparse(redirecturl)
file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
url = 'http://blip.tv/a/a-' + file_id
return self._real_extract(url)
if '?' in url:
cchar = '&'
else:
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url)
request.add_header('User-Agent', 'iTunes/10.6.1')
self.report_extraction(mobj.group(1))
info = None
try:
urlh = compat_urllib_request.urlopen(request)
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
basename = url.split('/')[-1]
title,ext = os.path.splitext(basename)
title = title.decode('UTF-8')
ext = ext.replace('.', '')
self.report_direct_download(title)
info = {
'id': title,
'url': url,
'uploader': None,
'upload_date': None,
'title': title,
'ext': ext,
'urlhandle': urlh
}
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
if info is None: # Regular URL
try:
json_code_bytes = urlh.read()
json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
try:
json_data = json.loads(json_code)
if 'Post' in json_data:
data = json_data['Post']
else:
data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
if 'additionalMedia' in data:
formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
best_format = formats[-1]
video_url = best_format['url']
else:
video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url)
if umobj is None:
raise ValueError('Can not determine filename extension')
ext = umobj.group(1)
info = {
'id': compat_str(data['item_id']),
'url': video_url,
'uploader': data['display_name'],
'upload_date': upload_date,
'title': data['title'],
'ext': ext,
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
'player_url': data['embedUrl'],
'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
return [info]
class BlipTVUserIE(InfoExtractor):
"""Information Extractor for blip.tv users."""
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
_PAGE_SIZE = 12
IE_NAME = u'blip.tv:user'
def _real_extract(self, url):
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
username = mobj.group(1)
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
page = self._download_webpage(url, username, u'Downloading user page')
mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1)
# Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
video_ids = []
pagenum = 1
while True:
url = page_base + "&page=" + str(pagenum)
page = self._download_webpage(url, username,
u'Downloading video ids from page %d' % pagenum)
# Extract video identifiers
ids_in_page = []
for mobj in re.finditer(r'href="/([^"]+)"', page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(unescapeHTML(mobj.group(1)))
video_ids.extend(ids_in_page)
# A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there
# are no more ids on further pages - no need to query
# again.
if len(ids_in_page) < self._PAGE_SIZE:
break
pagenum += 1
urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
return [self.playlist_result(url_entries, playlist_title = username)]
-27
View File
@@ -1,27 +0,0 @@
import re
from .common import InfoExtractor
class BloombergIE(InfoExtractor):
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?).html'
_TEST = {
u'url': u'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
u'file': u'12bzhqZTqQHmmlA8I-i0NpzJgcG5NNYX.mp4',
u'info_dict': {
u'title': u'Shah\'s Presentation on Foreign-Exchange Strategies',
u'description': u'md5:abc86e5236f9f0e4866c59ad36736686',
},
u'params': {
# Requires ffmpeg (m3u8 manifest)
u'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
ooyala_url = self._og_search_video_url(webpage)
return self.url_result(ooyala_url, ie='Ooyala')
-38
View File
@@ -1,38 +0,0 @@
import re
import json
from .common import InfoExtractor
from ..utils import determine_ext
class BreakIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
_TEST = {
u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
u'file': u'2468056.mp4',
u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
u'info_dict': {
u"title": u"When Girls Act Like D-Bags"
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split("-")[-1]
embed_url = 'http://www.break.com/embed/%s' % video_id
webpage = self._download_webpage(embed_url, video_id)
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
u'info json', flags=re.DOTALL)
info = json.loads(info_json)
video_url = info['videoUri']
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
if m_youtube is not None:
return self.url_result(m_youtube.group(1), 'Youtube')
final_url = video_url + '?' + info['AuthToken']
return [{
'id': video_id,
'url': final_url,
'ext': determine_ext(final_url),
'title': info['contentName'],
'thumbnail': info['thumbUri'],
}]
-177
View File
@@ -1,177 +0,0 @@
# encoding: utf-8
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
find_xpath_attr,
compat_urlparse,
compat_str,
compat_urllib_request,
ExtractorError,
)
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
u'file': u'2371591881001.mp4',
u'md5': u'8eccab865181d29ec2958f32a6a754f5',
u'note': u'Test Brightcove downloads and detection in GenericIE',
u'info_dict': {
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
u'uploader': u'8TV',
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
u'file': u'1785452137001.flv',
u'info_dict': {
u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
u'uploader': u'Oracle',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
u'info_dict': {
u'id': u'2750934548001',
u'ext': u'mp4',
u'title': u'This Bracelet Acts as a Personal Thermostat',
u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0',
u'uploader': u'Mashable',
},
},
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace(u'<--', u'<!--')
object_doc = xml.etree.ElementTree.fromstring(object_str)
assert u'BrightcoveExperience' in object_doc.attrib['class']
params = {'flashID': object_doc.attrib['id'],
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
}
def find_param(name):
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return None
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# The three fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the wepbage, returns None
if it can't be found
"""
m_brightcove = re.search(
r'<object[^>]+?class=([\'"])[^>]*?BrightcoveExperience.*?\1.+?</object>',
webpage, re.DOTALL)
if m_brightcove is not None:
return cls._build_brighcove_url(m_brightcove.group())
else:
return None
def _real_extract(self, url):
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
return self._get_video_info(videoPlayer[0], query_str, query)
else:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
def _get_video_info(self, video_id, query_str, query):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req = compat_urllib_request.Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
req.add_header('Referer', linkBase[0])
webpage = self._download_webpage(req, video_id)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
player_key, u'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' not in json_data:
raise ExtractorError(u'Empty playlist')
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id=playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': compat_str(video_info['id']),
'title': video_info['displayName'],
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
renditions = sorted(renditions, key=lambda r: r['size'])
info['formats'] = [{
'url': rend['defaultURL'],
'height': rend.get('frameHeight'),
'width': rend.get('frameWidth'),
} for rend in renditions]
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
})
else:
raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
return info
-36
View File
@@ -1,36 +0,0 @@
# coding: utf-8
import re
import json
from .common import InfoExtractor
from ..utils import determine_ext
class C56IE(InfoExtractor):
_VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
IE_NAME = u'56.com'
_TEST ={
u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html',
u'file': u'93440716.flv',
u'md5': u'e59995ac63d0457783ea05f93f12a866',
u'info_dict': {
u'title': u'网事知多少 第32期:车怒',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
text_id = mobj.group('textid')
info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
text_id, u'Downloading video info')
info = json.loads(info_page)['info']
best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1]
video_url = best_format['url']
return {'id': info['vid'],
'title': info['Subject'],
'url': video_url,
'ext': determine_ext(video_url),
'thumbnail': info.get('bimg') or info.get('img'),
}
-37
View File
@@ -1,37 +0,0 @@
# coding: utf-8
import re
from .common import InfoExtractor
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
_TEST = {
u'url': u'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
u'file': u'12163.mp4',
u'md5': u'060158428b650f896c542dfbb3d6487f',
u'info_dict': {
u'title': u'Terrasses du Numérique'
}
}
def _real_extract(self, url):
video_id = re.match(self._VALID_URL, url).group('id')
# We need to set the voir field for getting the file name
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
webpage = self._download_webpage(url, video_id)
file_name = self._search_regex(
r"so\.addVariable\('file','(.*?)'\);",
webpage, 'file name')
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
title = self._html_search_regex(
r'class="evenement8">(.*?)</a>', webpage, u'title')
return {'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
}
-55
View File
@@ -1,55 +0,0 @@
# encoding: utf-8
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import unified_strdate
class CanalplusIE(InfoExtractor):
_VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
IE_NAME = u'canalplus.fr'
_TEST = {
u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
u'file': u'922470.flv',
u'info_dict': {
u'title': u'Zapping - 26/08/13',
u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
u'upload_date': u'20130826',
},
u'params': {
u'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.groupdict().get('id')
if video_id is None:
webpage = self._download_webpage(url, mobj.group('path'))
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
info_url = self._VIDEO_INFO_TEMPLATE % video_id
info_page = self._download_webpage(info_url,video_id,
u'Downloading video info')
self.report_extraction(video_id)
doc = xml.etree.ElementTree.fromstring(info_page.encode('utf-8'))
video_info = [video for video in doc if video.find('ID').text == video_id][0]
infos = video_info.find('INFOS')
media = video_info.find('MEDIA')
formats = [media.find('VIDEOS/%s' % format)
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
video_url = [format.text for format in formats if format is not None][-1]
return {'id': video_id,
'title': u'%s - %s' % (infos.find('TITRAGE/TITRE').text,
infos.find('TITRAGE/SOUS_TITRE').text),
'url': video_url,
'ext': 'flv',
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
'thumbnail': media.find('IMAGES/GRAND').text,
'description': infos.find('DESCRIPTION').text,
'view_count': int(infos.find('NB_VUES').text),
}
-84
View File
@@ -1,84 +0,0 @@
# encoding: utf-8
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class CinemassacreIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?(?P<url>cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?)(?:[/?].*)?'
_TESTS = [{
u'url': u'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
u'file': u'19911.flv',
u'md5': u'f9bb7ede54d1229c9846e197b4737e06',
u'info_dict': {
u'upload_date': u'20121110',
u'title': u'“Angry Video Game Nerd: The Movie” Trailer',
u'description': u'md5:fb87405fcb42a331742a0dce2708560b',
}
},
{
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
u'file': u'521be8ef82b16.flv',
u'md5': u'9509ee44dcaa7c1068604817c19a9e50',
u'info_dict': {
u'upload_date': u'20131002',
u'title': u'The Mummys Hand (1940)',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
webpage_url = u'http://' + mobj.group('url')
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
if not mobj:
raise ExtractorError(u'Can\'t extract embed url and video id')
playerdata_url = mobj.group(u'embed_url')
video_id = mobj.group(u'video_id')
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
webpage, u'title')
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, u'description', flags=re.DOTALL, fatal=False)
if len(video_description) == 0:
video_description = None
playerdata = self._download_webpage(playerdata_url, video_id)
url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url')
sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file')
hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file')
video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)
formats = [
{
'url': url,
'play_path': 'mp4:' + sd_file,
'rtmp_live': True, # workaround
'ext': 'flv',
'format': 'sd',
'format_id': 'sd',
},
{
'url': url,
'play_path': 'mp4:' + hd_file,
'rtmp_live': True, # workaround
'ext': 'flv',
'format': 'hd',
'format_id': 'hd',
},
]
return {
'id': video_id,
'title': video_title,
'formats': formats,
'description': video_description,
'upload_date': video_date,
'thumbnail': video_thumbnail,
}
-53
View File
@@ -1,53 +0,0 @@
import re
import time
import xml.etree.ElementTree
from .common import InfoExtractor
class ClipfishIE(InfoExtractor):
IE_NAME = u'clipfish'
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
_TEST = {
u'url': u'http://www.clipfish.de/special/supertalent/video/4028320/supertalent-2013-ivana-opacak-singt-nobodys-perfect/',
u'file': u'4028320.f4v',
u'md5': u'5e38bda8c329fbfb42be0386a3f5a382',
u'info_dict': {
u'title': u'Supertalent 2013: Ivana Opacak singt Nobody\'s Perfect',
u'duration': 399,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
(video_id, int(time.time())))
info_xml = self._download_webpage(
info_url, video_id, note=u'Downloading info page')
doc = xml.etree.ElementTree.fromstring(info_xml)
title = doc.find('title').text
video_url = doc.find('filename').text
thumbnail = doc.find('imageurl').text
duration_str = doc.find('duration').text
m = re.match(
r'^(?P<hours>[0-9]+):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2}):(?P<ms>[0-9]*)$',
duration_str)
if m:
duration = (
(int(m.group('hours')) * 60 * 60) +
(int(m.group('minutes')) * 60) +
(int(m.group('seconds')))
)
else:
duration = None
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'duration': duration,
}
-58
View File
@@ -1,58 +0,0 @@
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import determine_ext
class CNNIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
_TESTS = [{
u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
u'md5': u'3e6121ea48df7e2259fe73a0628605c4',
u'info_dict': {
u'title': u'Nadal wins 8th French Open title',
u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
},
},
{
u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e",
u"info_dict": {
u"title": "Student's epic speech stuns new freshmen",
u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\""
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
path = mobj.group('path')
page_title = mobj.group('title')
info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path
info_xml = self._download_webpage(info_url, page_title)
info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8'))
formats = []
for f in info.findall('files/file'):
mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate'])
if mf is not None:
formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text))
formats = sorted(formats)
(_,_,_, video_path) = formats[-1]
video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
return {'id': info.attrib['id'],
'title': info.find('headline').text,
'url': video_url,
'ext': determine_ext(video_url),
'thumbnail': thumbnails[-1][1],
'thumbnails': thumbs_dict,
'description': info.find('description').text,
}
-82
View File
@@ -1,82 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse_urlparse,
determine_ext,
ExtractorError,
)
class CollegeHumorIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
_TESTS = [{
u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
u'file': u'6902724.mp4',
u'md5': u'1264c12ad95dca142a9f0bf7968105a0',
u'info_dict': {
u'title': u'Comic-Con Cosplay Catastrophe',
u'description': u'Fans get creative this year at San Diego. Too creative. And yes, that\'s really Joss Whedon.',
},
},
{
u'url': u'http://www.collegehumor.com/video/3505939/font-conference',
u'file': u'3505939.mp4',
u'md5': u'c51ca16b82bb456a4397987791a835f5',
u'info_dict': {
u'title': u'Font Conference',
u'description': u'This video wasn\'t long enough, so we made it double-spaced.',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('videoid')
info = {
'id': video_id,
'uploader': None,
'upload_date': None,
}
self.report_extraction(video_id)
xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
mdoc = self._download_xml(xmlUrl, video_id,
u'Downloading info XML',
u'Unable to download video info XML')
try:
videoNode = mdoc.findall('./video')[0]
youtubeIdNode = videoNode.find('./youtubeID')
if youtubeIdNode is not None:
return self.url_result(youtubeIdNode.text, 'Youtube')
info['description'] = videoNode.findall('./description')[0].text
info['title'] = videoNode.findall('./caption')[0].text
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
next_url = videoNode.findall('./file')[0].text
except IndexError:
raise ExtractorError(u'Invalid metadata XML file')
if next_url.endswith(u'manifest.f4m'):
manifest_url = next_url + '?hdcore=2.10.3'
adoc = self._download_xml(manifest_url, video_id,
u'Downloading XML manifest',
u'Unable to download video info XML')
try:
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
except IndexError:
raise ExtractorError(u'Invalid manifest file')
url_pr = compat_urllib_parse_urlparse(info['thumbnail'])
info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','')
info['ext'] = 'mp4'
else:
# Old-style direct links
info['url'] = next_url
info['ext'] = determine_ext(info['url'])
return info
-218
View File
@@ -1,218 +0,0 @@
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from .mtv import MTVIE, _media_xml_tag
from ..utils import (
compat_str,
compat_urllib_parse,
ExtractorError,
unified_strdate,
)
class ComedyCentralIE(MTVIE):
_VALID_URL = r'http://www.comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)'
_FEED_URL = u'http://comedycentral.com/feeds/mrss/'
_TEST = {
u'url': u'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
u'md5': u'4167875aae411f903b751a21f357f1ee',
u'info_dict': {
u'id': u'cef0cbb3-e776-4bc9-b62e-8016deccb354',
u'ext': u'mp4',
u'title': u'Uncensored - Greg Fitzsimmons - Too Good of a Mother',
u'description': u'After a certain point, breastfeeding becomes c**kblocking.',
},
}
# Overwrite MTVIE properties we don't want
_TESTS = []
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
return itemdoc.find(search_path).attrib['url']
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
mgid = self._search_regex(r'data-mgid="(?P<mgid>mgid:.*?)"',
webpage, u'mgid')
return self._get_videos_info(mgid)
class ComedyCentralShowsIE(InfoExtractor):
IE_DESC = u'The Daily Show / Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|(https?://)?(www\.)?
(?P<showname>thedailyshow|colbertnation)\.com/
(full-episodes/(?P<episode>.*)|
(?P<clip>
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))|
(?P<interview>
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
$"""
_TEST = {
u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
u'file': u'422212.mp4',
u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d',
u'info_dict': {
u"upload_date": u"20121214",
u"description": u"Kristen Stewart",
u"uploader": u"thedailyshow",
u"title": u"thedailyshow-kristen-stewart part 1"
}
}
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
if not m:
raise ExtractorError(u'Cannot transform RTMP url')
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
return base + m.group('finalid')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = u'http://www.thedailyshow.com/full-episodes/'
else:
url = u'http://www.colbertnation.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
self.report_extraction(epTitle)
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError(u'Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError(u'Redirected URL is still not specific: ' + url)
epTitle = mobj.group('episode')
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
indexXml = self._download_webpage(indexUrl, epTitle,
u'Downloading show index',
u'unable to download episode index')
results = []
idoc = xml.etree.ElementTree.fromstring(indexXml)
itemEls = idoc.findall('.//item')
for partNum,itemEl in enumerate(itemEls):
mediaId = itemEl.findall('./guid')[0].text
shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '')
officialTitle = itemEl.findall('./title')[0].text
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
compat_urllib_parse.urlencode({'uri': mediaId}))
configXml = self._download_webpage(configUrl, epTitle,
u'Downloading configuration for %s' % shortMediaId)
cdoc = xml.etree.ElementTree.fromstring(configXml)
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
if len(turls) == 0:
self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
continue
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'format_id': format,
'height': h,
'width': w,
})
effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
info = {
'id': shortMediaId,
'formats': formats,
'uploader': showId,
'upload_date': officialDate,
'title': effTitle,
'thumbnail': None,
'description': compat_str(officialTitle),
}
# TODO: Remove when #980 has been merged
info.update(info['formats'][-1])
results.append(info)
return results
-440
View File
@@ -1,440 +0,0 @@
import base64
import os
import re
import socket
import sys
import netrc
import xml.etree.ElementTree
from ..utils import (
compat_http_client,
compat_urllib_error,
compat_str,
clean_html,
compiled_regex_type,
ExtractorError,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the FileDownloader. The FileDownloader processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The dictionaries must include the following fields:
id: Video identifier.
url: Final video URL.
title: Video title, unescaped.
ext: Video filename extension.
Instead of url and ext, formats can also specified.
The following fields are optional:
format: The video format, defaults to ext (used for --get-format)
thumbnails: A list of dictionaries (with the entries "resolution" and
"url") for the varying thumbnails
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
uploader: Full name of the video uploader.
upload_date: Video upload date (YYYYMMDD).
uploader_id: Nickname or id of the video uploader.
location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}.
view_count: How many users have watched the video on the platform.
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
age_limit: Age restriction for the video, as an integer (years)
formats: A list of dictionaries for each format available, it must
be ordered from worst to best quality. Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19")
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* vbr Average video bitrate in KBit/s
* vcodec Name of the video codec in use
* filesize The number of bytes, if known in advance
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
Unless mentioned otherwise, the fields should be Unicode strings.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_real_extract() must return a *list* of information dictionaries as
described above.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
self.initialize()
return self._real_extract(url)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
self.to_screen(u'%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is None:
errnote = u'Unable to download webpage'
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err)
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
else:
encoding = 'utf-8'
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen(u'Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
raw_filename = ('%s_%s.dump' % (video_id, url))
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen(u'Saving request to ' + filename)
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
content = webpage_bytes.decode(encoding, 'replace')
return (content, urlh)
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
""" Returns the data of the page as a string """
return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
def _download_xml(self, url_or_request, video_id, note=u'Downloading XML', errnote=u'Unable to downloand XML'):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(url_or_request, video_id, note, errnote)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen(u'%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen(u'Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen(u'Logging in')
#Methods for following #608
def url_result(self, url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed"""
#TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
return video_info
def playlist_result(self, entries, playlist_id=None, playlist_title=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
return video_info
def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj: break
if sys.stderr.isatty() and os.name != 'nt':
_name = u'\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
elif default is not None:
return default
elif fatal:
raise RegexNotFoundError(u'Unable to extract %s' % _name)
else:
self._downloader.report_warning(u'unable to extract %s; '
u'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
return (username, password)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')'
property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video')
if secure: regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _html_search_meta(self, name, html, display_name=None):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?ix)<meta(?=[^>]+(?:name|property)=["\']%s["\'])
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
html, display_name, fatal=False)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError(u'Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
-106
View File
@@ -1,106 +0,0 @@
# coding: utf-8
import re
import json
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
orderedSet,
compat_urllib_parse_urlparse,
compat_urlparse,
)
class CondeNastIE(InfoExtractor):
"""
Condé Nast is a media group, some of its sites use a custom HTML5 player
that works the same in all of them.
"""
# The keys are the supported sites and the values are the name to be shown
# to the user and in the extractor description.
_SITES = {'wired': u'WIRED',
'gq': u'GQ',
'vogue': u'Vogue',
'glamour': u'Glamour',
'wmagazine': u'W Magazine',
'vanityfair': u'Vanity Fair',
}
_VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
_TEST = {
u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
u'file': u'5171b343c2b4c00dd0c1ccb3.mp4',
u'md5': u'1921f713ed48aabd715691f774c451f7',
u'info_dict': {
u'title': u'3D Printed Speakers Lit With LED',
u'description': u'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
}
}
def _extract_series(self, url, webpage):
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
webpage, u'series title', flags=re.DOTALL)
url_object = compat_urllib_parse_urlparse(url)
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
webpage, flags=re.DOTALL)
paths = orderedSet(m.group(1) for m in m_paths)
build_url = lambda path: compat_urlparse.urljoin(base_url, path)
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
return self.playlist_result(entries, playlist_title=title)
def _extract_video(self, webpage):
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
r'<div class="video-post-content">(.+?)</div>',
],
webpage, u'description',
fatal=False, flags=re.DOTALL)
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
u'player params', flags=re.DOTALL)
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id')
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id')
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target')
data = compat_urllib_parse.urlencode({'videoId': video_id,
'playerId': player_id,
'target': target,
})
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
webpage, u'base info url',
default='http://player.cnevids.com/player/loader.js?')
info_url = base_info_url + data
info_page = self._download_webpage(info_url, video_id,
u'Downloading video info')
video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info')
video_info = json.loads(video_info)
def _formats_sort_key(f):
type_ord = 1 if f['type'] == 'video/mp4' else 0
quality_ord = 1 if f['quality'] == 'high' else 0
return (quality_ord, type_ord)
best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1]
return {'id': video_id,
'url': best_format['src'],
'ext': best_format['type'].split('/')[-1],
'title': video_info['title'],
'thumbnail': video_info['poster_frame'],
'description': description,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site = mobj.group('site')
url_type = mobj.group('type')
id = mobj.group('id')
self.to_screen(u'Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, id)
if url_type == 'series':
return self._extract_series(url, webpage)
else:
return self._extract_video(webpage)
-40
View File
@@ -1,40 +0,0 @@
# -*- coding: utf-8 -*-
import re
from .common import InfoExtractor
from ..utils import determine_ext
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://www\.criterion\.com/films/(\d*)-.+'
_TEST = {
u'url': u'http://www.criterion.com/films/184-le-samourai',
u'file': u'184.mp4',
u'md5': u'bc51beba55685509883a9a7830919ec3',
u'info_dict': {
u"title": u"Le Samouraï",
u"description" : u'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(r'so.addVariable\("videoURL", "(.+?)"\)\;',
webpage, 'video url')
title = self._html_search_regex(r'<meta content="(.+?)" property="og:title" />',
webpage, 'video title')
description = self._html_search_regex(r'<meta name="description" content="(.+?)" />',
webpage, 'video description')
thumbnail = self._search_regex(r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {'id': video_id,
'url' : final_url,
'title': title,
'ext': determine_ext(final_url),
'description': description,
'thumbnail': thumbnail,
}
-51
View File
@@ -1,51 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
)
class CSpanIE(InfoExtractor):
_VALID_URL = r'http://www.c-spanvideo.org/program/(.*)'
_TEST = {
u'url': u'http://www.c-spanvideo.org/program/HolderonV',
u'file': u'315139.flv',
u'md5': u'74a623266956f69e4df0068ab6c80fe4',
u'info_dict': {
u"title": u"Attorney General Eric Holder on Voting Rights Act Decision"
},
u'skip': u'Requires rtmpdump'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
prog_name = mobj.group(1)
webpage = self._download_webpage(url, prog_name)
video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
data = compat_urllib_parse.urlencode({'programid': video_id,
'dynamic':'1'})
info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data
video_info = self._download_webpage(info_url, video_id, u'Downloading video info')
self.report_extraction(video_id)
title = self._html_search_regex(r'<string name="title">(.*?)</string>',
video_info, 'title')
description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"',
webpage, 'description',
flags=re.MULTILINE|re.DOTALL)
url = self._search_regex(r'<string name="URL">(.*?)</string>',
video_info, 'video url')
url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443')
path = self._search_regex(r'<string name="path">(.*?)</string>',
video_info, 'rtmp play path')
return {'id': video_id,
'title': title,
'ext': 'flv',
'url': url,
'play_path': path,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
}
-22
View File
@@ -1,22 +0,0 @@
# encoding: utf-8
from .canalplus import CanalplusIE
class D8IE(CanalplusIE):
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
IE_NAME = u'd8.tv'
_TEST = {
u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
u'file': u'966289.flv',
u'info_dict': {
u'title': u'Campagne intime - Documentaire exceptionnel',
u'description': u'md5:d2643b799fb190846ae09c61e59a859f',
u'upload_date': u'20131108',
},
u'params': {
# rtmp
u'skip_download': True,
},
}
-228
View File
@@ -1,228 +0,0 @@
import re
import json
import itertools
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
compat_urllib_request,
compat_str,
get_element_by_attribute,
get_element_by_id,
orderedSet,
ExtractorError,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off')
request.add_header('Cookie', 'ff=off')
return request
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'
IE_NAME = u'dailymotion'
_FORMATS = [
(u'stream_h264_ld_url', u'ld'),
(u'stream_h264_url', u'standard'),
(u'stream_h264_hq_url', u'hq'),
(u'stream_h264_hd_url', u'hd'),
(u'stream_h264_hd1080_url', u'hd180'),
]
_TESTS = [
{
u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
u'file': u'x33vw9.mp4',
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
u'info_dict': {
u"uploader": u"Amphora Alex and Van .",
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
}
},
# Vevo video
{
u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
u'file': u'USUV71301934.mp4',
u'info_dict': {
u'title': u'Roar (Official)',
u'uploader': u'Katy Perry',
u'upload_date': u'20130905',
},
u'params': {
u'skip_download': True,
},
u'skip': u'VEVO is only available in some countries',
},
# age-restricted video
{
u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
u'file': u'xyh2zz.mp4',
u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
u'info_dict': {
u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
u'uploader': 'HotWaves1012',
u'age_limit': 18,
}
}
]
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split('_')[0].split('?')[0]
url = 'http://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen(u'Vevo video detected: %s' % vevo_id)
return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
# Looking for official user
r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
webpage, 'video uploader', fatal=False)
age_limit = self._rta_search(webpage)
video_upload_date = None
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
if mobj is not None:
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
u'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = m_size.group(1), m_size.group(2)
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
if not formats:
raise ExtractorError(u'Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, webpage)
return
return {
'id': video_id,
'formats': formats,
'uploader': video_uploader,
'upload_date': video_upload_date,
'title': self._og_search_title(webpage),
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
}
def _get_available_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
return sub_lang_list
self._downloader.report_warning(u'video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = u'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, u'Downloading page %s' % pagenum)
playlist_el = get_element_by_attribute(u'class', u'row video_list', webpage)
video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {'_type': 'playlist',
'id': playlist_id,
'title': get_element_by_id(u'playlist_name', webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = u'dailymotion:user'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(url, user)
full_user = self._html_search_regex(
r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user),
webpage, u'user', flags=re.DOTALL)
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}

Some files were not shown because too many files have changed in this diff Show More