KhanAcademyIE,
EveryonesMixtapeIE,
RutubeChannelIE,
+ GoogleSearchIE,
GenericIE,
)
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
self.assertEqual(len(result['entries']), 3)
+ def test_GoogleSearch(self):
+ dl = FakeYDL()
+ ie = GoogleSearchIE(dl)
+ result = ie.extract('gvsearch15:python language')
+ self.assertIsPlaylist(result)
+ self.assertEqual(result['id'], 'python language')
+ self.assertEqual(result['title'], 'python language')
+ self.assertTrue(len(result['entries']) == 15)
if __name__ == '__main__':
unittest.main()
+from __future__ import unicode_literals
+
import itertools
import re
class GoogleSearchIE(SearchInfoExtractor):
- IE_DESC = u'Google Video search'
- _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
+ IE_DESC = 'Google Video search'
_MAX_RESULTS = 1000
- IE_NAME = u'video.google:search'
+ IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
+ entries = []
res = {
'_type': 'playlist',
'id': query,
- 'entries': []
+ 'title': query,
}
- for pagenum in itertools.count(1):
- result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
- webpage = self._download_webpage(result_url, u'gvsearch:' + query,
- note='Downloading result page ' + str(pagenum))
+ for pagenum in itertools.count():
+ result_url = (
+ 'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
+ % (compat_urllib_parse.quote_plus(query), pagenum * 10))
+
+ webpage = self._download_webpage(
+ result_url, 'gvsearch:' + query,
+ note='Downloading result page ' + str(pagenum + 1))
+
+ for hit_idx, mobj in enumerate(re.finditer(
+ r'<h3 class="r"><a href="([^"]+)"', webpage)):
+
+ # Skip playlists
+ if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
+ continue
- for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
- e = {
+ entries.append({
'_type': 'url',
'url': mobj.group(1)
- }
- res['entries'].append(e)
+ })
- if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+ if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
+ res['entries'] = entries[:n]
return res