Fix GoogleSearchIE (Fixes #822)

This commit is contained in:
Philipp Hagemeister 2013-05-05 20:49:57 +02:00
parent b338f1b154
commit 94ca71b7cc

View file

@ -1494,82 +1494,53 @@ class YoutubeSearchIE(InfoExtractor):
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries.""" """Information Extractor for Google Video search queries."""
_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'gvsearch(?P<prefix>|\d+|all):(?P<query>[\s\S]+)'
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
_VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)'
_MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"' _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
_max_google_results = 1000 _max_google_results = 1000
IE_NAME = u'video.google:search' IE_NAME = u'video.google:search'
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query): def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query) mobj = re.match(self._VALID_URL, query)
if mobj is None:
self._downloader.report_error(u'invalid search query "%s"' % query)
return
prefix, query = query.split(':') prefix = mobj.group('prefix')
prefix = prefix[8:] query = mobj.group('query')
query = query.encode('utf-8')
if prefix == '': if prefix == '':
self._download_n_results(query, 1) return self._download_n_results(query, 1)
return
elif prefix == 'all': elif prefix == 'all':
self._download_n_results(query, self._max_google_results) return self._download_n_results(query, self._max_google_results)
return
else: else:
try:
n = int(prefix) n = int(prefix)
if n <= 0: if n <= 0:
self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query)) raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results: elif n > self._max_google_results:
self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
n = self._max_google_results n = self._max_google_results
self._download_n_results(query, n) return self._download_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
def _download_n_results(self, query, n): def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] res = {
pagenum = 0 '_type': 'playlist',
'id': query,
'entries': []
}
while True: for pagenum in itertools.count(1):
self.report_download_page(query, pagenum) result_url = u'http://video.google.com/videosearch?q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10) webpage = self._download_webpage(result_url, u'gvsearch:' + query,
request = compat_urllib_request.Request(result_url) note='Downloading result page ' + str(pagenum))
try:
page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
# Extract video identifiers # Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page): for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
video_id = mobj.group(1) e = {
if video_id not in video_ids: '_type': 'url',
video_ids.append(video_id) 'url': mobj.group(1)
if len(video_ids) == n: }
# Specified n videos reached res['entries'].append(e)
for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids:
self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id])
return
pagenum = pagenum + 1
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
return res
class YahooSearchIE(InfoExtractor): class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries.""" """Information Extractor for Yahoo! Video search queries."""