From: Erwin de Haan Date: Sat, 6 Feb 2016 22:37:55 +0000 (+0100) Subject: [vlive] Updated to new V App/VLive api. X-Git-Url: http://git.oshgnacknak.de/?a=commitdiff_plain;h=b8b465af3e83fb19c1818c2fa83f0c5f753dd917;p=youtube-dl [vlive] Updated to new V App/VLive api. More robust with getting keys and ids from website. --- diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py index 86c1cb5ef..3e1f8ef07 100644 --- a/youtube_dl/extractor/vlive.py +++ b/youtube_dl/extractor/vlive.py @@ -9,17 +9,18 @@ from time import time from .common import InfoExtractor from ..utils import ( ExtractorError, - determine_ext + determine_ext, + int_or_none ) from ..compat import compat_urllib_parse class VLiveIE(InfoExtractor): IE_NAME = 'vlive' - # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices + # vlive.tv/video/ links redirect to www.vlive.tv/video/ _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P[0-9]+)' _TEST = { - 'url': 'http://m.vlive.tv/video/1326', + 'url': 'http://www.vlive.tv/video/1326', 'md5': 'cc7314812855ce56de70a06a27314983', 'info_dict': { 'id': '1326', @@ -28,50 +29,45 @@ class VLiveIE(InfoExtractor): 'creator': 'Girl\'s Day', }, } - _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( - 'http://m.vlive.tv/video/%s' % video_id, + 'http://www.vlive.tv/video/%s' % video_id, video_id, note='Download video page') + long_video_id = self._search_regex( + r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"([^"]+)",\s?"[^"]+",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'long_video_id') + + key = self._search_regex( + r'vlive\.tv\.video\.ajax\.request\.handler\.init\("[0-9]+",\s?"[^"]*",\s?"[^"]+",\s?"([^"]+)",\s?"[^"]*",\s?"[^"]*"\)', webpage, 'key') + title = self._og_search_title(webpage) thumbnail = self._og_search_thumbnail(webpage) creator = self._html_search_regex( - r']+class="name">([^<>]+)', webpage, 'creator') + r'
\s*]+class="name">([^<>]+)', webpage, 'creator',fatal=False) - url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id - msgpad = '%.0f' % (time() * 1000) - md = b64encode( - hmac.new(self._SECRET.encode('ascii'), - (url[:255] + msgpad).encode('ascii'), sha1).digest() - ) - url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md}) + # doct = document type (xml or json), cpt = caption type (vtt or ttml) + url = "http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?videoId=%s&key=%s&ptc=http&doct=json&cpt=vtt" % (long_video_id, key) + playinfo = self._download_json(url, video_id, 'Downloading video json') - if playinfo.get('message', '') != 'success': - raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful')) - - if not playinfo.get('result'): - raise ExtractorError('No videos found.') - formats = [] - for vid in playinfo['result'].get('videos', {}).get('list', []): + for vid in playinfo.get('videos', {}).get('list', []): formats.append({ 'url': vid['source'], 'ext': 'mp4', 'abr': vid.get('bitrate', {}).get('audio'), 'vbr': vid.get('bitrate', {}).get('video'), - 'format_id': vid['encodingOption']['name'], - 'height': vid.get('height'), - 'width': vid.get('width'), + 'format_id': vid.get('encodingOption', {}).get('name'), + 'height': int_or_none(vid.get('encodingOption', {}).get('height')), + 'width': int_or_none(vid.get('encodingOption', {}).get('width')), }) self._sort_formats(formats) subtitles = {} - for caption in playinfo['result'].get('captions', {}).get('list', []): + for caption in playinfo.get('captions', {}).get('list', []): subtitles[caption['language']] = [ {'ext': determine_ext(caption['source'], default_ext='vtt'), 'url': caption['source']}]