youtube-dl

Another place where youtube-dl lives on
git clone git://git.oshgnacknak.de/youtube-dl.git
Log | Files | Refs | README | LICENSE

commit ccf9114e84ded8dd90b01c6c2d4d3ff3b111d7cd
parent 211e17dd436b7626d13195f482133ef223d154bb
Author: Philipp Hagemeister <phihag@phihag.de>
Date:   Thu,  6 Feb 2014 03:29:10 +0100

[googlesearch] Fix start, and skip playlists (Fixes #2329)

Diffstat:
Mtest/test_playlists.py | 9+++++++++
Myoutube_dl/extractor/googlesearch.py | 38+++++++++++++++++++++++++-------------
2 files changed, 34 insertions(+), 13 deletions(-)

diff --git a/test/test_playlists.py b/test/test_playlists.py @@ -34,6 +34,7 @@ from youtube_dl.extractor import ( KhanAcademyIE, EveryonesMixtapeIE, RutubeChannelIE, + GoogleSearchIE, GenericIE, ) @@ -240,6 +241,14 @@ class TestPlaylists(unittest.TestCase): self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker') self.assertEqual(len(result['entries']), 3) + def test_GoogleSearch(self): + dl = FakeYDL() + ie = GoogleSearchIE(dl) + result = ie.extract('gvsearch15:python language') + self.assertIsPlaylist(result) + self.assertEqual(result['id'], 'python language') + self.assertEqual(result['title'], 'python language') + self.assertTrue(len(result['entries']) == 15) if __name__ == '__main__': unittest.main() diff --git a/youtube_dl/extractor/googlesearch.py b/youtube_dl/extractor/googlesearch.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import itertools import re @@ -8,32 +10,42 @@ from ..utils import ( class GoogleSearchIE(SearchInfoExtractor): - IE_DESC = u'Google Video search' - _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' + IE_DESC = 'Google Video search' _MAX_RESULTS = 1000 - IE_NAME = u'video.google:search' + IE_NAME = 'video.google:search' _SEARCH_KEY = 'gvsearch' def _get_n_results(self, query, n): """Get a specified number of results for a query""" + entries = [] res = { '_type': 'playlist', 'id': query, - 'entries': [] + 'title': query, } - for pagenum in itertools.count(1): - result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) - webpage = self._download_webpage(result_url, u'gvsearch:' + query, - note='Downloading result page ' + str(pagenum)) + for pagenum in itertools.count(): + result_url = ( + 'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' + % (compat_urllib_parse.quote_plus(query), pagenum * 10)) + + webpage = self._download_webpage( + result_url, 'gvsearch:' + query, + note='Downloading result page ' + str(pagenum + 1)) + + for hit_idx, mobj in enumerate(re.finditer( + r'<h3 class="r"><a href="([^"]+)"', webpage)): + + # Skip playlists + if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage): + continue - for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage): - e = { + entries.append({ '_type': 'url', 'url': mobj.group(1) - } - res['entries'].append(e) + }) - if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): + if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage): + res['entries'] = entries[:n] return res