Hello community,
here is the log from the commit of package youtube-dl for openSUSE:Factory checked in at 2019-04-17 10:10:33
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/youtube-dl (Old)
and /work/SRC/openSUSE:Factory/.youtube-dl.new.17052 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "youtube-dl"
Wed Apr 17 10:10:33 2019 rev:102 rq:694873 version:2019.04.17
Changes:
--------
--- /work/SRC/openSUSE:Factory/youtube-dl/python-youtube-dl.changes 2019-04-02 09:23:59.656777897 +0200
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new.17052/python-youtube-dl.changes 2019-04-17 10:10:41.226934469 +0200
@@ -1,0 +2,26 @@
+Tue Apr 16 22:12:01 UTC 2019 - Jan Engelhardt
+
+- Update to new upstream release 2019.04.17
+ * openload: Randomize User-Agent
+ * yahoo: add support GYAO episode URLs
+ * yahoo: add support for streaming.yahoo.co.jp
+ * cbs: extract smpte and vtt subtitles
+ * streamango: add support for streamcherry.com
+
+-------------------------------------------------------------------
+Sat Apr 6 21:30:38 UTC 2019 - Jan Engelhardt
+
+- Update to new upstream release 2019.04.07
+ * mediasite: Add support for dashed ids and named catalogs
+ * YoutubeDL: Add ffmpeg_location to post processor options
+ * gaia: add support for authentication
+ * adobeconnect: Add new extractor
+ * vk: use a more unique video id
+ * adn: fix extraction and add support for positioning styles
+ * teamcoco: fix extraction and add suport for subdomains
+ * youtube: extract srv[1-3] subtitle formats
+ * hbo: fix extraction and extract subtitles
+ * vrv: add basic support for individual movie links
+ * ruutu: Add support for audio podcasts
+
+-------------------------------------------------------------------
--- /work/SRC/openSUSE:Factory/youtube-dl/youtube-dl.changes 2019-04-08 10:40:35.379299132 +0200
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new.17052/youtube-dl.changes 2019-04-17 10:10:52.966946978 +0200
@@ -1,0 +2,10 @@
+Tue Apr 16 22:12:01 UTC 2019 - Jan Engelhardt
+
+- Update to new upstream release 2019.04.17
+ * openload: Randomize User-Agent
+ * yahoo: add support GYAO episode URLs
+ * yahoo: add support for streaming.yahoo.co.jp
+ * cbs: extract smpte and vtt subtitles
+ * streamango: add support for streamcherry.com
+
+-------------------------------------------------------------------
Old:
----
youtube-dl-2019.04.07.tar.gz
youtube-dl-2019.04.07.tar.gz.sig
New:
----
youtube-dl-2019.04.17.tar.gz
youtube-dl-2019.04.17.tar.gz.sig
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.B1dJez/_old 2019-04-17 10:10:57.278951572 +0200
+++ /var/tmp/diff_new_pack.B1dJez/_new 2019-04-17 10:10:57.282951576 +0200
@@ -19,7 +19,7 @@
%define modname youtube-dl
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
Name: python-youtube-dl
-Version: 2019.04.07
+Version: 2019.04.17
Release: 0
Summary: A python module for downloading from video sites for offline watching
License: SUSE-Public-Domain AND CC-BY-SA-3.0
++++++ youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.B1dJez/_old 2019-04-17 10:10:57.302951597 +0200
+++ /var/tmp/diff_new_pack.B1dJez/_new 2019-04-17 10:10:57.306951602 +0200
@@ -17,7 +17,7 @@
Name: youtube-dl
-Version: 2019.04.07
+Version: 2019.04.17
Release: 0
Summary: A tool for downloading from video sites for offline watching
License: SUSE-Public-Domain AND CC-BY-SA-3.0
++++++ youtube-dl-2019.04.07.tar.gz -> youtube-dl-2019.04.17.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/ChangeLog new/youtube-dl/ChangeLog
--- old/youtube-dl/ChangeLog 2019-04-06 23:19:39.000000000 +0200
+++ new/youtube-dl/ChangeLog 2019-04-16 19:20:04.000000000 +0200
@@ -1,3 +1,28 @@
+version 2019.04.17
+
+Extractors
+* [openload] Randomize User-Agent (closes #20688)
++ [openload] Add support for oladblock domains (#20471)
+* [adn] Fix subtitle extraction (#12724)
++ [aol] Add support for localized websites
++ [yahoo] Add support GYAO episode URLs
++ [yahoo] Add support for streaming.yahoo.co.jp (#5811, #7098)
++ [yahoo] Add support for gyao.yahoo.co.jp
+* [aenetworks] Fix history topic extraction and extract more formats
++ [cbs] Extract smpte and vtt subtitles
++ [streamango] Add support for streamcherry.com (#20592)
++ [yourporn] Add support for sxyprn.com (#20646)
+* [mgtv] Fix extraction (#20650)
+* [linkedin:learning] Use urljoin for form action URL (#20431)
++ [gdc] Add support for kaltura embeds (#20575)
+* [dispeak] Improve mp4 bitrate extraction
+* [kaltura] Sanitize embed URLs
+* [jwplatfom] Do not match manifest URLs (#20596)
+* [aol] Restrict URL regular expression and improve format extraction
++ [tiktok] Add support for new URL schema (#20573)
++ [stv:player] Add support for player.stv.tv (#20586)
+
+
version 2019.04.07
Core
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/README.md new/youtube-dl/README.md
--- old/youtube-dl/README.md 2019-04-06 23:19:44.000000000 +0200
+++ new/youtube-dl/README.md 2019-04-16 19:20:08.000000000 +0200
@@ -700,7 +700,7 @@
# Download best mp4 format available or any other best if no mp4 available
$ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
-# Download best format available but not better that 480p
+# Download best format available but no better than 480p
$ youtube-dl -f 'bestvideo[height<=480]+bestaudio/best[height<=480]'
# Download best video only format but no bigger than 50 MB
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/README.txt new/youtube-dl/README.txt
--- old/youtube-dl/README.txt 2019-04-06 23:20:32.000000000 +0200
+++ new/youtube-dl/README.txt 2019-04-16 19:20:50.000000000 +0200
@@ -892,7 +892,7 @@
# Download best mp4 format available or any other best if no mp4 available
$ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
- # Download best format available but not better that 480p
+ # Download best format available but no better than 480p
$ youtube-dl -f 'bestvideo[height<=480]+bestaudio/best[height<=480]'
# Download best video only format but no bigger than 50 MB
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/docs/supportedsites.md new/youtube-dl/docs/supportedsites.md
--- old/youtube-dl/docs/supportedsites.md 2019-04-06 23:19:46.000000000 +0200
+++ new/youtube-dl/docs/supportedsites.md 2019-04-16 19:20:09.000000000 +0200
@@ -46,6 +46,7 @@
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
- **AnimeOnDemand**
- **Anvato**
+ - **aol.com**
- **APA**
- **Aparat**
- **AppleConnect**
@@ -632,7 +633,6 @@
- **OdaTV**
- **Odnoklassniki**
- **OktoberfestTV**
- - **on.aol.com**
- **OnDemandKorea**
- **onet.pl**
- **onet.tv**
@@ -853,6 +853,7 @@
- **StreamCZ**
- **StreetVoice**
- **StretchInternet**
+ - **stv:player**
- **SunPorno**
- **SVT**
- **SVTPage**
@@ -1124,6 +1125,8 @@
- **XVideos**
- **XXXYMovies**
- **Yahoo**: Yahoo screen and movies
+ - **yahoo:gyao**
+ - **yahoo:gyao:player**
- **YandexDisk**
- **yandexmusic:album**: Яндекс.Музыка - Альбом
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
Binary files old/youtube-dl/youtube-dl and new/youtube-dl/youtube-dl differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube-dl.1 new/youtube-dl/youtube-dl.1
--- old/youtube-dl/youtube-dl.1 2019-04-06 23:20:35.000000000 +0200
+++ new/youtube-dl/youtube-dl.1 2019-04-16 19:20:52.000000000 +0200
@@ -1471,7 +1471,7 @@
#\ Download\ best\ mp4\ format\ available\ or\ any\ other\ best\ if\ no\ mp4\ available
$\ youtube\-dl\ \-f\ \[aq]bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best\[aq]
-#\ Download\ best\ format\ available\ but\ not\ better\ that\ 480p
+#\ Download\ best\ format\ available\ but\ no\ better\ than\ 480p
$\ youtube\-dl\ \-f\ \[aq]bestvideo[height<=480]+bestaudio/best[height<=480]\[aq]
#\ Download\ best\ video\ only\ format\ but\ no\ bigger\ than\ 50\ MB
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/adn.py new/youtube-dl/youtube_dl/extractor/adn.py
--- old/youtube-dl/youtube_dl/extractor/adn.py 2019-04-06 23:19:12.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/adn.py 2019-04-08 01:24:14.000000000 +0200
@@ -60,14 +60,19 @@
enc_subtitles = self._download_webpage(
urljoin(self._BASE_URL, sub_path),
- video_id, 'Downloading subtitles data', fatal=False)
+ video_id, 'Downloading subtitles location', fatal=False) or '{}'
+ subtitle_location = (self._parse_json(enc_subtitles, video_id, fatal=False) or {}).get('location')
+ if subtitle_location:
+ enc_subtitles = self._download_webpage(
+ urljoin(self._BASE_URL, subtitle_location),
+ video_id, 'Downloading subtitles data', fatal=False)
if not enc_subtitles:
return None
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
- bytes_to_intlist(binascii.unhexlify(self._K + '083db5aebd9353b4')),
+ bytes_to_intlist(binascii.unhexlify(self._K + '4421de0a5f0814ba')),
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
))
subtitles_json = self._parse_json(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/aenetworks.py new/youtube-dl/youtube_dl/extractor/aenetworks.py
--- old/youtube-dl/youtube_dl/extractor/aenetworks.py 2019-04-06 23:18:59.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/aenetworks.py 2019-04-08 01:24:14.000000000 +0200
@@ -1,14 +1,15 @@
+# coding: utf-8
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE
from ..utils import (
+ extract_attributes,
+ ExtractorError,
+ int_or_none,
smuggle_url,
update_url_query,
- unescapeHTML,
- extract_attributes,
- get_element_by_attribute,
)
from ..compat import (
compat_urlparse,
@@ -19,6 +20,43 @@
_THEPLATFORM_KEY = 'crazyjava'
_THEPLATFORM_SECRET = 's3cr3t'
+ def _extract_aen_smil(self, smil_url, video_id, auth=None):
+ query = {'mbr': 'true'}
+ if auth:
+ query['auth'] = auth
+ TP_SMIL_QUERY = [{
+ 'assetTypes': 'high_video_ak',
+ 'switch': 'hls_high_ak'
+ }, {
+ 'assetTypes': 'high_video_s3'
+ }, {
+ 'assetTypes': 'high_video_s3',
+ 'switch': 'hls_ingest_fastly'
+ }]
+ formats = []
+ subtitles = {}
+ last_e = None
+ for q in TP_SMIL_QUERY:
+ q.update(query)
+ m_url = update_url_query(smil_url, q)
+ m_url = self._sign_url(m_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
+ try:
+ tp_formats, tp_subtitles = self._extract_theplatform_smil(
+ m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
+ except ExtractorError as e:
+ last_e = e
+ continue
+ formats.extend(tp_formats)
+ subtitles = self._merge_subtitles(subtitles, tp_subtitles)
+ if last_e and not formats:
+ raise last_e
+ self._sort_formats(formats)
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
@@ -33,22 +71,25 @@
(?:
shows/(?P[^/]+(?:/[^/]+){0,2})|
movies/(?P[^/]+)(?:/full-movie)?|
- specials/(?P[^/]+)/full-special|
+ specials/(?P[^/]+)/(?:full-special|preview-)|
collections/[^/]+/(?P[^/]+)
)
'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
- 'md5': 'a97a65f7e823ae10e9244bc5433d5fe6',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
- 'title': 'Winter Is Coming',
+ 'title': 'Winter is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/shows/ancient-aliens/season-1',
@@ -84,6 +125,9 @@
}, {
'url': 'https://www.historyvault.com/collections/america-the-story-of-us/westward',
'only_matching': True
+ }, {
+ 'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/prev...',
+ 'only_matching': True
}]
_DOMAIN_TO_REQUESTOR_ID = {
'history.com': 'HISTORY',
@@ -124,11 +168,6 @@
return self.playlist_result(
entries, self._html_search_meta('aetn:SeasonId', webpage))
- query = {
- 'mbr': 'true',
- 'assetTypes': 'high_video_ak',
- 'switch': 'hls_high_ak',
- }
video_id = self._html_search_meta('aetn:VideoID', webpage)
media_url = self._search_regex(
[r"media_url\s*=\s*'(?P<url>[^']+)'",
@@ -138,64 +177,39 @@
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
+ auth = None
if theplatform_metadata.get('AETN$isBehindWall'):
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
- query['auth'] = self._extract_mvpd_auth(
+ auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._search_json_ld(webpage, video_id, fatal=False))
- media_url = update_url_query(media_url, query)
- media_url = self._sign_url(media_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
- formats, subtitles = self._extract_theplatform_smil(media_url, video_id)
- self._sort_formats(formats)
- info.update({
- 'id': video_id,
- 'formats': formats,
- 'subtitles': subtitles,
- })
+ info.update(self._extract_aen_smil(media_url, video_id, auth))
return info
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
- _VALID_URL = r'https?://(?:www\.)?history\.com/topics/(?:[^/]+/)?(?P[^/]+)(?:/[^/]+(?:/(?P[^/?#]+))?)?'
+ _VALID_URL = r'https?://(?:www\.)?history\.com/topics/[^/]+/(?P<id>[\w+-]+?)-video'
_TESTS = [{
- 'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
+ 'url': 'https://www.history.com/topics/valentines-day/history-of-valentines-day-vide...',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
- 'title': "Bet You Didn't Know: Valentine's Day",
+ 'title': "History of Valentine’s Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
- 'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
- }, {
- 'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/videos',
- 'info_dict':
- {
- 'id': 'world-war-i-history',
- 'title': 'World War I History',
- },
- 'playlist_mincount': 23,
- }, {
- 'url': 'http://www.history.com/topics/world-war-i-history/videos',
- 'only_matching': True,
- }, {
- 'url': 'http://www.history.com/topics/world-war-i/world-war-i-history',
- 'only_matching': True,
- }, {
- 'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/speeches',
- 'only_matching': True,
}]
def theplatform_url_result(self, theplatform_url, video_id, query):
@@ -215,27 +229,19 @@
}
def _real_extract(self, url):
- topic_id, video_display_id = re.match(self._VALID_URL, url).groups()
- if video_display_id:
- webpage = self._download_webpage(url, video_display_id)
- release_url, video_id = re.search(r"_videoPlayer.play\('([^']+)'\s*,\s*'[^']+'\s*,\s*'(\d+)'\)", webpage).groups()
- release_url = unescapeHTML(release_url)
-
- return self.theplatform_url_result(
- release_url, video_id, {
- 'mbr': 'true',
- 'switch': 'hls',
- 'assetTypes': 'high_video_ak',
- })
- else:
- webpage = self._download_webpage(url, topic_id)
- entries = []
- for episode_item in re.findall(r']*>', webpage):
- video_attributes = extract_attributes(episode_item)
- entries.append(self.theplatform_url_result(
- video_attributes['data-release-url'], video_attributes['data-id'], {
- 'mbr': 'true',
- 'switch': 'hls',
- 'assetTypes': 'high_video_ak',
- }))
- return self.playlist_result(entries, topic_id, get_element_by_attribute('class', 'show-title', webpage))
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(
+ r']+src="[^"]+\btpid=(\d+)', webpage, 'tpid')
+ result = self._download_json(
+ 'https://feeds.video.aetnd.com/api/v2/history/videos',
+ video_id, query={'filter[id]': video_id})['results'][0]
+ title = result['title']
+ info = self._extract_aen_smil(result['publicUrl'], video_id)
+ info.update({
+ 'title': title,
+ 'description': result.get('description'),
+ 'duration': int_or_none(result.get('duration')),
+ 'timestamp': int_or_none(result.get('added'), 1000),
+ })
+ return info
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/aol.py new/youtube-dl/youtube_dl/extractor/aol.py
--- old/youtube-dl/youtube_dl/extractor/aol.py 2019-04-06 23:18:59.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/aol.py 2019-04-08 01:24:14.000000000 +0200
@@ -4,6 +4,10 @@
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+)
from ..utils import (
ExtractorError,
int_or_none,
@@ -12,12 +16,12 @@
class AolIE(InfoExtractor):
- IE_NAME = 'on.aol.com'
- _VALID_URL = r'(?:aol-video:|https?://(?:(?:www|on)\.)?aol\.com/(?:[^/]+/)*(?:[^/?#&]+-)?)(?P<id>[^/?#&]+)'
+ IE_NAME = 'aol.com'
+ _VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>[0-9a-f]+)'
_TESTS = [{
# video with 5min ID
- 'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-5...',
+ 'url': 'https://www.aol.com/video/view/u-s--official-warns-of-largest-ever-irs-phone...',
'md5': '18ef68f48740e86ae94b98da815eec42',
'info_dict': {
'id': '518167793',
@@ -34,7 +38,7 @@
}
}, {
# video with vidible ID
- 'url': 'http://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04f...',
+ 'url': 'https://www.aol.com/video/view/netflix-is-raising-rates/5707d6b8e4b090497b04...',
'info_dict': {
'id': '5707d6b8e4b090497b04f706',
'ext': 'mp4',
@@ -49,16 +53,28 @@
'skip_download': True,
}
}, {
- 'url': 'http://on.aol.com/partners/abc-551438d309eab105804dbfe8/sneak-peek-was-haley...',
+ 'url': 'https://www.aol.com/video/view/park-bench-season-2-trailer/559a1b9be4b0c3bfa...',
'only_matching': True,
}, {
- 'url': 'http://on.aol.com/shows/park-bench-shw518173474-559a1b9be4b0c3bfad3357a7?con...',
+ 'url': 'https://www.aol.com/video/view/donald-trump-spokeswoman-tones-down-megyn-kel...',
'only_matching': True,
}, {
- 'url': 'http://on.aol.com/video/519442220',
+ 'url': 'aol-video:5707d6b8e4b090497b04f706',
'only_matching': True,
}, {
- 'url': 'aol-video:5707d6b8e4b090497b04f706',
+ 'url': 'https://www.aol.com/video/playlist/PL8245/5ca79d19d21f1a04035db606/',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.aol.ca/video/view/u-s-woman-s-family-arrested-for-murder-first-p...',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.aol.co.uk/video/view/-one-dead-and-22-hurt-in-bus-crash-/5cb3a6f...',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.aol.de/video/view/eva-braun-privataufnahmen-von-hitlers-geliebte...',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.aol.jp/video/playlist/5a28e936a1334d000137da0c/5a28f3151e642219f...',
'only_matching': True,
}]
@@ -73,7 +89,7 @@
video_data = response['data']
formats = []
- m3u8_url = video_data.get('videoMasterPlaylist')
+ m3u8_url = url_or_none(video_data.get('videoMasterPlaylist'))
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
@@ -96,6 +112,12 @@
'width': int(mobj.group(1)),
'height': int(mobj.group(2)),
})
+ else:
+ qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
+ f.update({
+ 'width': int_or_none(qs.get('w', [None])[0]),
+ 'height': int_or_none(qs.get('h', [None])[0]),
+ })
formats.append(f)
self._sort_formats(formats, ('width', 'height', 'tbr', 'format_id'))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/cbs.py new/youtube-dl/youtube_dl/extractor/cbs.py
--- old/youtube-dl/youtube_dl/extractor/cbs.py 2019-04-06 23:19:00.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/cbs.py 2019-04-08 01:24:14.000000000 +0200
@@ -13,13 +13,17 @@
class CBSBaseIE(ThePlatformFeedIE):
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
- closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
- return {
- 'en': [{
- 'ext': 'ttml',
- 'url': closed_caption_e.attrib['value'],
- }]
- } if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
+ subtitles = {}
+ for k, ext in [('sMPTE-TTCCURL', 'tt'), ('ClosedCaptionURL', 'ttml'), ('webVTTCaptionURL', 'vtt')]:
+ cc_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', k)
+ if cc_e is not None:
+ cc_url = cc_e.get('value')
+ if cc_url:
+ subtitles.setdefault(subtitles_lang, []).append({
+ 'ext': ext,
+ 'url': cc_url,
+ })
+ return subtitles
class CBSIE(CBSBaseIE):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/dispeak.py new/youtube-dl/youtube_dl/extractor/dispeak.py
--- old/youtube-dl/youtube_dl/extractor/dispeak.py 2019-04-06 23:19:00.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/dispeak.py 2019-04-08 01:24:15.000000000 +0200
@@ -58,10 +58,17 @@
stream_name = xpath_text(a_format, 'streamName', fatal=True)
video_path = re.match(r'mp4\:(?P<path>.*)', stream_name).group('path')
url = video_root + video_path
- vbr = xpath_text(a_format, 'bitrate')
+ bitrate = xpath_text(a_format, 'bitrate')
+ tbr = int_or_none(bitrate)
+ vbr = int_or_none(self._search_regex(
+ r'-(\d+)\.mp4', video_path, 'vbr', default=None))
+ abr = tbr - vbr if tbr and vbr else None
video_formats.append({
+ 'format_id': bitrate,
'url': url,
- 'vbr': int_or_none(vbr),
+ 'tbr': tbr,
+ 'vbr': vbr,
+ 'abr': abr,
})
return video_formats
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/extractors.py new/youtube-dl/youtube_dl/extractor/extractors.py
--- old/youtube-dl/youtube_dl/extractor/extractors.py 2019-04-06 23:19:12.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/extractors.py 2019-04-08 01:24:15.000000000 +0200
@@ -1093,6 +1093,7 @@
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .stretchinternet import StretchInternetIE
+from .stv import STVPlayerIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
@@ -1451,6 +1452,8 @@
from .yahoo import (
YahooIE,
YahooSearchIE,
+ YahooGyaOPlayerIE,
+ YahooGyaOIE,
)
from .yandexdisk import YandexDiskIE
from .yandexmusic import (
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/gdcvault.py new/youtube-dl/youtube_dl/extractor/gdcvault.py
--- old/youtube-dl/youtube_dl/extractor/gdcvault.py 2019-04-06 23:19:00.000000000 +0200
+++ new/youtube-dl/youtube_dl/extractor/gdcvault.py 2019-04-08 01:24:15.000000000 +0200
@@ -3,22 +3,24 @@
import re
from .common import InfoExtractor
+from .kaltura import KalturaIE
from ..utils import (
HEADRequest,
sanitized_Request,
+ smuggle_url,
urlencode_postdata,
)
class GDCVaultIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)?'
+ _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)(?:/(?P<name>[\w-]+))?'
_NETRC_MACHINE = 'gdcvault'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
- 'id': '1019721',
+ 'id': '201311826596_AWNY',
'display_id': 'Doki-Doki-Universe-Sweet-Simple',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
@@ -27,7 +29,7 @@
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
- 'id': '1015683',
+ 'id': '201203272_1330951438328RSXR',
'display_id': 'Embracing-the-Dark-Art-of',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
@@ -56,7 +58,7 @@
'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface',
'md5': 'a8efb6c31ed06ca8739294960b2dbabd',
'info_dict': {
- 'id': '1023460',
+ 'id': '840376_BQRC',
'ext': 'mp4',
'display_id': 'Tenacious-Design-and-The-Interface',
'title': 'Tenacious Design and The Interface of \'Destiny\'',
@@ -66,26 +68,38 @@
# Multiple audios
'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC',
'info_dict': {
- 'id': '1014631',
- 'ext': 'flv',
+ 'id': '12396_1299111843500GMPX',
+ 'ext': 'mp4',
'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man',
},
- 'params': {
- 'skip_download': True, # Requires rtmpdump
- 'format': 'jp', # The japanese audio
- }
+ # 'params': {
+ # 'skip_download': True, # Requires rtmpdump
+ # 'format': 'jp', # The japanese audio
+ # }
},
{
# gdc-player.html
'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo',
'info_dict': {
- 'id': '1435',
+ 'id': '9350_1238021887562UHXB',
'display_id': 'An-American-engine-in-Tokyo',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT',
},
+ },
+ {
+ # Kaltura Embed
+ 'url': 'https://www.gdcvault.com/play/1026180/Mastering-the-Apex-of-Scaling',
+ 'info_dict': {
+ 'id': '0_h1fg8j3p',
+ 'ext': 'mp4',
+ 'title': 'Mastering the Apex of Scaling Game Servers (Presented by Multiplay)',
+ 'timestamp': 1554401811,
+ 'upload_date': '20190404',
+ 'uploader_id': 'joe@blazestreaming.com',
+ },
'params': {
- 'skip_download': True, # Requires rtmpdump
+ 'format': 'mp4-408',
},
},
]
@@ -114,10 +128,8 @@
return start_page
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
-
- video_id = mobj.group('id')
- display_id = mobj.group('name') or video_id
+ video_id, name = re.match(self._VALID_URL, url).groups()
+ display_id = name or video_id
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, display_id)
@@ -127,12 +139,12 @@
start_page, 'url', default=None)
if direct_url:
title = self._html_search_regex(
- r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
+ r'<td><strong>Session Name:?</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
video_url = 'http://www.gdcvault.com' + direct_url
# resolve the url so that we can detect the correct extension
- head = self._request_webpage(HEADRequest(video_url), video_id)
- video_url = head.geturl()
+ video_url = self._request_webpage(
+ HEADRequest(video_url), video_id).geturl()
return {
'id': video_id,
@@ -141,34 +153,36 @@
'title': title,
}
- PLAYER_REGEX = r'