]> Raphaƫl G. Git Repositories - youtubedl/blobdiff - youtube_dl/extractor/googlesearch.py
Update upstream source from tag 'upstream/2019.01.16'
[youtubedl] / youtube_dl / extractor / googlesearch.py
index 5c25642702993f1ec344ce9a0d4967fffedc760a..5279fa807f6903fa757c552b3e9ad3e013e5b494 100644 (file)
@@ -4,9 +4,6 @@ import itertools
 import re
 
 from .common import SearchInfoExtractor
-from ..utils import (
-    compat_urllib_parse,
-)
 
 
 class GoogleSearchIE(SearchInfoExtractor):
@@ -14,6 +11,14 @@ class GoogleSearchIE(SearchInfoExtractor):
     _MAX_RESULTS = 1000
     IE_NAME = 'video.google:search'
     _SEARCH_KEY = 'gvsearch'
+    _TEST = {
+        'url': 'gvsearch15:python language',
+        'info_dict': {
+            'id': 'python language',
+            'title': 'python language',
+        },
+        'playlist_count': 15,
+    }
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
@@ -26,13 +31,16 @@ class GoogleSearchIE(SearchInfoExtractor):
         }
 
         for pagenum in itertools.count():
-            result_url = (
-                'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
-                % (compat_urllib_parse.quote_plus(query), pagenum * 10))
-
             webpage = self._download_webpage(
-                result_url, 'gvsearch:' + query,
-                note='Downloading result page ' + str(pagenum + 1))
+                'http://www.google.com/search',
+                'gvsearch:' + query,
+                note='Downloading result page %s' % (pagenum + 1),
+                query={
+                    'tbm': 'vid',
+                    'q': query,
+                    'start': pagenum * 10,
+                    'hl': 'en',
+                })
 
             for hit_idx, mobj in enumerate(re.finditer(
                     r'<h3 class="r"><a href="([^"]+)"', webpage)):
@@ -46,6 +54,6 @@ class GoogleSearchIE(SearchInfoExtractor):
                     'url': mobj.group(1)
                 })
 
-            if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
+            if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
                 res['entries'] = entries[:n]
                 return res