]> Raphaël G. Git Repositories - youtubedl/commitdiff
Imported Upstream version 2014.08.05
authorRogério Brito <rbrito@ime.usp.br>
Wed, 6 Aug 2014 18:35:56 +0000 (15:35 -0300)
committerRogério Brito <rbrito@ime.usp.br>
Wed, 6 Aug 2014 18:35:56 +0000 (15:35 -0300)
92 files changed:
README.md
README.txt
test/helper.py
test/swftests/.gitignore [new file with mode: 0644]
test/swftests/ArrayAccess.as [new file with mode: 0644]
test/swftests/ArrayAccess.swf [new file with mode: 0644]
test/swftests/ClassCall.as [new file with mode: 0644]
test/swftests/ClassCall.swf [new file with mode: 0644]
test/swftests/ClassConstruction.as [new file with mode: 0644]
test/swftests/ClassConstruction.swf [new file with mode: 0644]
test/swftests/LocalVars.as [new file with mode: 0644]
test/swftests/LocalVars.swf [new file with mode: 0644]
test/swftests/PrivateCall.as [new file with mode: 0644]
test/swftests/PrivateCall.swf [new file with mode: 0644]
test/swftests/StaticAssignment.as [new file with mode: 0644]
test/swftests/StaticAssignment.swf [new file with mode: 0644]
test/swftests/StaticRetrieval.as [new file with mode: 0644]
test/swftests/StaticRetrieval.swf [new file with mode: 0644]
test/test_all_urls.py
test/test_download.py
test/test_playlists.py
test/test_swfinterp.py [new file with mode: 0644]
test/test_youtube_signature.py
youtube-dl
youtube-dl.1
youtube-dl.bash-completion
youtube_dl/FileDownloader.py [deleted file]
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/downloader/f4m.py
youtube_dl/extractor/__init__.py
youtube_dl/extractor/abc.py [new file with mode: 0644]
youtube_dl/extractor/adultswim.py [new file with mode: 0644]
youtube_dl/extractor/allocine.py
youtube_dl/extractor/ard.py
youtube_dl/extractor/blinkx.py
youtube_dl/extractor/bloomberg.py
youtube_dl/extractor/br.py
youtube_dl/extractor/cbs.py
youtube_dl/extractor/chilloutzone.py
youtube_dl/extractor/cnet.py
youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/common.py
youtube_dl/extractor/cracked.py [new file with mode: 0644]
youtube_dl/extractor/dfb.py [new file with mode: 0644]
youtube_dl/extractor/dropbox.py
youtube_dl/extractor/firedrive.py
youtube_dl/extractor/francetv.py
youtube_dl/extractor/funnyordie.py
youtube_dl/extractor/gamestar.py [new file with mode: 0644]
youtube_dl/extractor/gdcvault.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/godtube.py [new file with mode: 0644]
youtube_dl/extractor/izlesene.py [new file with mode: 0644]
youtube_dl/extractor/justintv.py
youtube_dl/extractor/kickstarter.py
youtube_dl/extractor/krasview.py [new file with mode: 0644]
youtube_dl/extractor/livestream.py
youtube_dl/extractor/mlb.py [new file with mode: 0644]
youtube_dl/extractor/nbc.py
youtube_dl/extractor/npo.py
youtube_dl/extractor/pbs.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/rtbf.py
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/rtve.py
youtube_dl/extractor/sapo.py [new file with mode: 0644]
youtube_dl/extractor/savefrom.py
youtube_dl/extractor/shared.py [new file with mode: 0644]
youtube_dl/extractor/snotr.py [new file with mode: 0644]
youtube_dl/extractor/sockshare.py [new file with mode: 0644]
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/steam.py
youtube_dl/extractor/streamcloud.py
youtube_dl/extractor/swrmediathek.py
youtube_dl/extractor/tagesschau.py
youtube_dl/extractor/teachertube.py
youtube_dl/extractor/tenplay.py
youtube_dl/extractor/tvplay.py [new file with mode: 0644]
youtube_dl/extractor/ubu.py [new file with mode: 0644]
youtube_dl/extractor/vevo.py
youtube_dl/extractor/vidme.py [new file with mode: 0644]
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/vodlocker.py
youtube_dl/extractor/vube.py
youtube_dl/extractor/wdr.py
youtube_dl/extractor/youtube.py
youtube_dl/jsinterp.py
youtube_dl/postprocessor/ffmpeg.py
youtube_dl/swfinterp.py [new file with mode: 0644]
youtube_dl/utils.py
youtube_dl/version.py

index bc5e0f76df2759b004be225d4419d021a5bcf3a0..a42dfb8567ffb86e926756aec59cbee94cee29bf 100644 (file)
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@ To install it right away for all UNIX users (Linux, OS X, etc.), type:
 
 If you do not have curl, you can alternatively use a recent wget:
 
-    sudo wget https://yt-dl.org/downloads/2014.05.13/youtube-dl -O /usr/local/bin/youtube-dl
+    sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
     sudo chmod a+x /usr/local/bin/youtube-dl
 
 Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
@@ -38,12 +38,6 @@ which means you can modify it, redistribute it or use it however you like.
                                      playlist or the command line) if an error
                                      occurs
     --dump-user-agent                display the current browser identification
-    --user-agent UA                  specify a custom user agent
-    --referer REF                    specify a custom referer, use if the video
-                                     access is restricted to one domain
-    --add-header FIELD:VALUE         specify a custom HTTP header and its value,
-                                     separated by a colon ':'. You can use this
-                                     option multiple times
     --list-extractors                List all supported extractors and the URLs
                                      they would handle
     --extractor-descriptions         Output descriptions of all supported
@@ -51,35 +45,22 @@ which means you can modify it, redistribute it or use it however you like.
     --proxy URL                      Use the specified HTTP/HTTPS proxy. Pass in
                                      an empty string (--proxy "") for direct
                                      connection
-    --no-check-certificate           Suppress HTTPS certificate validation.
-    --prefer-insecure                Use an unencrypted connection to retrieve
-                                     information about the video. (Currently
-                                     supported only for YouTube)
-    --cache-dir DIR                  Location in the filesystem where youtube-dl
-                                     can store some downloaded information
-                                     permanently. By default $XDG_CACHE_HOME
-                                     /youtube-dl or ~/.cache/youtube-dl . At the
-                                     moment, only YouTube player files (for
-                                     videos with obfuscated signatures) are
-                                     cached, but that may change.
-    --no-cache-dir                   Disable filesystem caching
     --socket-timeout None            Time to wait before giving up, in seconds
-    --bidi-workaround                Work around terminals that lack
-                                     bidirectional text support. Requires bidiv
-                                     or fribidi executable in PATH
     --default-search PREFIX          Use this prefix for unqualified URLs. For
                                      example "gvsearch2:" downloads two videos
                                      from google videos for  youtube-dl "large
                                      apple". Use the value "auto" to let
-                                     youtube-dl guess. The default value "error"
-                                     just throws an error.
+                                     youtube-dl guess ("auto_warning" to emit a
+                                     warning when guessing). "error" just throws
+                                     an error. The default value "fixup_error"
+                                     repairs broken URLs, but emits an error if
+                                     this is not possible instead of searching.
     --ignore-config                  Do not read configuration files. When given
                                      in the global configuration file /etc
                                      /youtube-dl.conf: do not read the user
                                      configuration in ~/.config/youtube-dl.conf
                                      (%APPDATA%/youtube-dl/config.txt on
                                      Windows)
-    --encoding ENCODING              Force the specified encoding (experimental)
 
 ## Video Selection:
     --playlist-start NUMBER          playlist video to start at (default is 1)
@@ -125,9 +106,9 @@ which means you can modify it, redistribute it or use it however you like.
                                      of SIZE.
 
 ## Filesystem Options:
-    -t, --title                      use title in file name (default)
+    -a, --batch-file FILE            file containing URLs to download ('-' for
+                                     stdin)
     --id                             use only video ID in file name
-    -l, --literal                    [deprecated] alias of --title
     -A, --auto-number                number downloaded files starting from 00000
     -o, --output TEMPLATE            output filename template. Use %(title)s to
                                      get the title, %(uploader)s for the
@@ -160,18 +141,15 @@ which means you can modify it, redistribute it or use it however you like.
     --restrict-filenames             Restrict filenames to only ASCII
                                      characters, and avoid "&" and spaces in
                                      filenames
-    -a, --batch-file FILE            file containing URLs to download ('-' for
-                                     stdin)
-    --load-info FILE                 json file containing the video information
-                                     (created with the "--write-json" option)
+    -t, --title                      [deprecated] use title in file name
+                                     (default)
+    -l, --literal                    [deprecated] alias of --title
     -w, --no-overwrites              do not overwrite files
     -c, --continue                   force resume of partially downloaded files.
                                      By default, youtube-dl will resume
                                      downloads if possible.
     --no-continue                    do not resume partially downloaded files
                                      (restart from beginning)
-    --cookies FILE                   file to read cookies from and dump cookie
-                                     jar in
     --no-part                        do not use .part files
     --no-mtime                       do not use the Last-modified header to set
                                      the file modification time
@@ -181,6 +159,19 @@ which means you can modify it, redistribute it or use it however you like.
     --write-annotations              write video annotations to a .annotation
                                      file
     --write-thumbnail                write thumbnail image to disk
+    --load-info FILE                 json file containing the video information
+                                     (created with the "--write-json" option)
+    --cookies FILE                   file to read cookies from and dump cookie
+                                     jar in
+    --cache-dir DIR                  Location in the filesystem where youtube-dl
+                                     can store some downloaded information
+                                     permanently. By default $XDG_CACHE_HOME
+                                     /youtube-dl or ~/.cache/youtube-dl . At the
+                                     moment, only YouTube player files (for
+                                     videos with obfuscated signatures) are
+                                     cached, but that may change.
+    --no-cache-dir                   Disable filesystem caching
+    --rm-cache-dir                   Delete all filesystem cache files
 
 ## Verbosity / Simulation Options:
     -q, --quiet                      activates quiet mode
@@ -210,6 +201,22 @@ which means you can modify it, redistribute it or use it however you like.
                                      problems
     --print-traffic                  Display sent and read HTTP traffic
 
+## Workarounds:
+    --encoding ENCODING              Force the specified encoding (experimental)
+    --no-check-certificate           Suppress HTTPS certificate validation.
+    --prefer-insecure                Use an unencrypted connection to retrieve
+                                     information about the video. (Currently
+                                     supported only for YouTube)
+    --user-agent UA                  specify a custom user agent
+    --referer REF                    specify a custom referer, use if the video
+                                     access is restricted to one domain
+    --add-header FIELD:VALUE         specify a custom HTTP header and its value,
+                                     separated by a colon ':'. You can use this
+                                     option multiple times
+    --bidi-workaround                Work around terminals that lack
+                                     bidirectional text support. Requires bidiv
+                                     or fribidi executable in PATH
+
 ## Video Format Options:
     -f, --format FORMAT              video format code, specify the order of
                                      preference using slashes: "-f 22/17/18".
index 5555b2a72b8d6b228e606a862e5e2a13b6698db9..4f41be250632c9edaf722ae2fa402180d376f8a8 100644 (file)
@@ -15,7 +15,7 @@ To install it right away for all UNIX users (Linux, OS X, etc.), type:
 
 If you do not have curl, you can alternatively use a recent wget:
 
-    sudo wget https://yt-dl.org/downloads/2014.05.13/youtube-dl -O /usr/local/bin/youtube-dl
+    sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
     sudo chmod a+x /usr/local/bin/youtube-dl
 
 Windows users can download a .exe file and place it in their home
@@ -50,12 +50,6 @@ OPTIONS
                                      playlist or the command line) if an error
                                      occurs
     --dump-user-agent                display the current browser identification
-    --user-agent UA                  specify a custom user agent
-    --referer REF                    specify a custom referer, use if the video
-                                     access is restricted to one domain
-    --add-header FIELD:VALUE         specify a custom HTTP header and its value,
-                                     separated by a colon ':'. You can use this
-                                     option multiple times
     --list-extractors                List all supported extractors and the URLs
                                      they would handle
     --extractor-descriptions         Output descriptions of all supported
@@ -63,35 +57,22 @@ OPTIONS
     --proxy URL                      Use the specified HTTP/HTTPS proxy. Pass in
                                      an empty string (--proxy "") for direct
                                      connection
-    --no-check-certificate           Suppress HTTPS certificate validation.
-    --prefer-insecure                Use an unencrypted connection to retrieve
-                                     information about the video. (Currently
-                                     supported only for YouTube)
-    --cache-dir DIR                  Location in the filesystem where youtube-dl
-                                     can store some downloaded information
-                                     permanently. By default $XDG_CACHE_HOME
-                                     /youtube-dl or ~/.cache/youtube-dl . At the
-                                     moment, only YouTube player files (for
-                                     videos with obfuscated signatures) are
-                                     cached, but that may change.
-    --no-cache-dir                   Disable filesystem caching
     --socket-timeout None            Time to wait before giving up, in seconds
-    --bidi-workaround                Work around terminals that lack
-                                     bidirectional text support. Requires bidiv
-                                     or fribidi executable in PATH
     --default-search PREFIX          Use this prefix for unqualified URLs. For
                                      example "gvsearch2:" downloads two videos
                                      from google videos for  youtube-dl "large
                                      apple". Use the value "auto" to let
-                                     youtube-dl guess. The default value "error"
-                                     just throws an error.
+                                     youtube-dl guess ("auto_warning" to emit a
+                                     warning when guessing). "error" just throws
+                                     an error. The default value "fixup_error"
+                                     repairs broken URLs, but emits an error if
+                                     this is not possible instead of searching.
     --ignore-config                  Do not read configuration files. When given
                                      in the global configuration file /etc
                                      /youtube-dl.conf: do not read the user
                                      configuration in ~/.config/youtube-dl.conf
                                      (%APPDATA%/youtube-dl/config.txt on
                                      Windows)
-    --encoding ENCODING              Force the specified encoding (experimental)
 
 Video Selection:
 ----------------
@@ -143,9 +124,9 @@ Download Options:
 Filesystem Options:
 -------------------
 
-    -t, --title                      use title in file name (default)
+    -a, --batch-file FILE            file containing URLs to download ('-' for
+                                     stdin)
     --id                             use only video ID in file name
-    -l, --literal                    [deprecated] alias of --title
     -A, --auto-number                number downloaded files starting from 00000
     -o, --output TEMPLATE            output filename template. Use %(title)s to
                                      get the title, %(uploader)s for the
@@ -178,18 +159,15 @@ Filesystem Options:
     --restrict-filenames             Restrict filenames to only ASCII
                                      characters, and avoid "&" and spaces in
                                      filenames
-    -a, --batch-file FILE            file containing URLs to download ('-' for
-                                     stdin)
-    --load-info FILE                 json file containing the video information
-                                     (created with the "--write-json" option)
+    -t, --title                      [deprecated] use title in file name
+                                     (default)
+    -l, --literal                    [deprecated] alias of --title
     -w, --no-overwrites              do not overwrite files
     -c, --continue                   force resume of partially downloaded files.
                                      By default, youtube-dl will resume
                                      downloads if possible.
     --no-continue                    do not resume partially downloaded files
                                      (restart from beginning)
-    --cookies FILE                   file to read cookies from and dump cookie
-                                     jar in
     --no-part                        do not use .part files
     --no-mtime                       do not use the Last-modified header to set
                                      the file modification time
@@ -199,6 +177,19 @@ Filesystem Options:
     --write-annotations              write video annotations to a .annotation
                                      file
     --write-thumbnail                write thumbnail image to disk
+    --load-info FILE                 json file containing the video information
+                                     (created with the "--write-json" option)
+    --cookies FILE                   file to read cookies from and dump cookie
+                                     jar in
+    --cache-dir DIR                  Location in the filesystem where youtube-dl
+                                     can store some downloaded information
+                                     permanently. By default $XDG_CACHE_HOME
+                                     /youtube-dl or ~/.cache/youtube-dl . At the
+                                     moment, only YouTube player files (for
+                                     videos with obfuscated signatures) are
+                                     cached, but that may change.
+    --no-cache-dir                   Disable filesystem caching
+    --rm-cache-dir                   Delete all filesystem cache files
 
 Verbosity / Simulation Options:
 -------------------------------
@@ -230,6 +221,24 @@ Verbosity / Simulation Options:
                                      problems
     --print-traffic                  Display sent and read HTTP traffic
 
+Workarounds:
+------------
+
+    --encoding ENCODING              Force the specified encoding (experimental)
+    --no-check-certificate           Suppress HTTPS certificate validation.
+    --prefer-insecure                Use an unencrypted connection to retrieve
+                                     information about the video. (Currently
+                                     supported only for YouTube)
+    --user-agent UA                  specify a custom user agent
+    --referer REF                    specify a custom referer, use if the video
+                                     access is restricted to one domain
+    --add-header FIELD:VALUE         specify a custom HTTP header and its value,
+                                     separated by a colon ':'. You can use this
+                                     option multiple times
+    --bidi-workaround                Work around terminals that lack
+                                     bidirectional text support. Requires bidiv
+                                     or fribidi executable in PATH
+
 Video Format Options:
 ---------------------
 
index 230d2bd67ab06b4db552bff30c5620f83673ca93..b7299fb82c2e541fc520ba11c5c52d9edcc972e3 100644 (file)
@@ -137,8 +137,8 @@ def expect_info_dict(self, expected_dict, got_dict):
 
 
 def assertRegexpMatches(self, text, regexp, msg=None):
-    if hasattr(self, 'assertRegexpMatches'):
-        return self.assertRegexpMatches(text, regexp, msg)
+    if hasattr(self, 'assertRegexp'):
+        return self.assertRegexp(text, regexp, msg)
     else:
         m = re.match(regexp, text)
         if not m:
@@ -148,3 +148,10 @@ def assertRegexpMatches(self, text, regexp, msg=None):
             else:
                 msg = note + ', ' + msg
             self.assertTrue(m, msg)
+
+
+def assertGreaterEqual(self, got, expected, msg=None):
+    if not (got >= expected):
+        if msg is None:
+            msg = '%r not greater than or equal to %r' % (got, expected)
+        self.assertTrue(got >= expected, msg)
diff --git a/test/swftests/.gitignore b/test/swftests/.gitignore
new file mode 100644 (file)
index 0000000..da97ff7
--- /dev/null
@@ -0,0 +1 @@
+*.swf
diff --git a/test/swftests/ArrayAccess.as b/test/swftests/ArrayAccess.as
new file mode 100644 (file)
index 0000000..e22caa3
--- /dev/null
@@ -0,0 +1,19 @@
+// input: [["a", "b", "c", "d"]]
+// output: ["c", "b", "a", "d"]
+
+package {
+public class ArrayAccess {
+    public static function main(ar:Array):Array {
+       var aa:ArrayAccess = new ArrayAccess();
+       return aa.f(ar, 2);
+    }
+
+    private function f(ar:Array, num:Number):Array{
+        var x:String = ar[0];
+        var y:String = ar[num % ar.length];
+        ar[0] = y;
+        ar[num] = x;
+        return ar;
+    }
+}
+}
diff --git a/test/swftests/ArrayAccess.swf b/test/swftests/ArrayAccess.swf
new file mode 100644 (file)
index 0000000..3f5b32e
Binary files /dev/null and b/test/swftests/ArrayAccess.swf differ
diff --git a/test/swftests/ClassCall.as b/test/swftests/ClassCall.as
new file mode 100644 (file)
index 0000000..aef58da
--- /dev/null
@@ -0,0 +1,17 @@
+// input: []
+// output: 121
+
+package {
+public class ClassCall {
+    public static function main():int{
+       var f:OtherClass = new OtherClass();
+        return f.func(100,20);
+    }
+}
+}
+
+class OtherClass {
+       public function func(x: int, y: int):int {
+               return x+y+1;
+       }
+}
diff --git a/test/swftests/ClassCall.swf b/test/swftests/ClassCall.swf
new file mode 100644 (file)
index 0000000..2f1b31b
Binary files /dev/null and b/test/swftests/ClassCall.swf differ
diff --git a/test/swftests/ClassConstruction.as b/test/swftests/ClassConstruction.as
new file mode 100644 (file)
index 0000000..436479f
--- /dev/null
@@ -0,0 +1,15 @@
+// input: []
+// output: 0
+
+package {
+public class ClassConstruction {
+    public static function main():int{
+       var f:Foo = new Foo();
+        return 0;
+    }
+}
+}
+
+class Foo {
+
+}
diff --git a/test/swftests/ClassConstruction.swf b/test/swftests/ClassConstruction.swf
new file mode 100644 (file)
index 0000000..51e2ad8
Binary files /dev/null and b/test/swftests/ClassConstruction.swf differ
diff --git a/test/swftests/LocalVars.as b/test/swftests/LocalVars.as
new file mode 100644 (file)
index 0000000..b2911a9
--- /dev/null
@@ -0,0 +1,13 @@
+// input: [1, 2]
+// output: 3
+
+package {
+public class LocalVars {
+    public static function main(a:int, b:int):int{
+        var c:int = a + b + b;
+        var d:int = c - b;
+        var e:int = d;
+        return e;
+    }
+}
+}
diff --git a/test/swftests/LocalVars.swf b/test/swftests/LocalVars.swf
new file mode 100644 (file)
index 0000000..3e9344f
Binary files /dev/null and b/test/swftests/LocalVars.swf differ
diff --git a/test/swftests/PrivateCall.as b/test/swftests/PrivateCall.as
new file mode 100644 (file)
index 0000000..f1c110a
--- /dev/null
@@ -0,0 +1,21 @@
+// input: []
+// output: 9
+
+package {
+public class PrivateCall {
+    public static function main():int{
+       var f:OtherClass = new OtherClass();
+        return f.func();
+    }
+}
+}
+
+class OtherClass {
+       private function pf():int {
+               return 9;
+       }
+
+       public function func():int {
+               return this.pf();
+       }
+}
diff --git a/test/swftests/PrivateCall.swf b/test/swftests/PrivateCall.swf
new file mode 100644 (file)
index 0000000..55d9eac
Binary files /dev/null and b/test/swftests/PrivateCall.swf differ
diff --git a/test/swftests/StaticAssignment.as b/test/swftests/StaticAssignment.as
new file mode 100644 (file)
index 0000000..b061c21
--- /dev/null
@@ -0,0 +1,13 @@
+// input: [1]
+// output: 1
+
+package {
+public class StaticAssignment {
+       public static var v:int;
+
+    public static function main(a:int):int{
+        v = a;
+        return v;
+    }
+}
+}
diff --git a/test/swftests/StaticAssignment.swf b/test/swftests/StaticAssignment.swf
new file mode 100644 (file)
index 0000000..b28450a
Binary files /dev/null and b/test/swftests/StaticAssignment.swf differ
diff --git a/test/swftests/StaticRetrieval.as b/test/swftests/StaticRetrieval.as
new file mode 100644 (file)
index 0000000..c8352d8
--- /dev/null
@@ -0,0 +1,16 @@
+// input: []
+// output: 1
+
+package {
+public class StaticRetrieval {
+       public static var v:int;
+
+    public static function main():int{
+        if (v) {
+               return 0;
+        } else {
+               return 1;
+        }
+    }
+}
+}
diff --git a/test/swftests/StaticRetrieval.swf b/test/swftests/StaticRetrieval.swf
new file mode 100644 (file)
index 0000000..bb0f3de
Binary files /dev/null and b/test/swftests/StaticRetrieval.swf differ
index 2bc81f0205165068f7c9cbc06634ddfaacbba68b..0ff47cf1ead4a2c89aa24a83023a1d29cde31717 100644 (file)
@@ -15,7 +15,6 @@ from youtube_dl.extractor import (
     FacebookIE,
     gen_extractors,
     JustinTVIE,
-    PBSIE,
     YoutubeIE,
 )
 
index f171c10bad84a876a9fe4caba2b71a984c3169ec..d6540588c130f6bafacd4ef7d077e6debf8d911d 100644 (file)
@@ -10,7 +10,6 @@ from test.helper import (
     get_params,
     gettestcases,
     expect_info_dict,
-    md5,
     try_rm,
     report_warning,
 )
@@ -24,7 +23,6 @@ import socket
 import youtube_dl.YoutubeDL
 from youtube_dl.utils import (
     compat_http_client,
-    compat_str,
     compat_urllib_error,
     compat_HTTPError,
     DownloadError,
index 1a38a667b1391ab744fa88e91d79e4eea742bbf6..4f188345bf2b9bd7fee5d886cbcfec3ba15dae6e 100644 (file)
@@ -11,6 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import (
     assertRegexpMatches,
+    assertGreaterEqual,
     expect_info_dict,
     FakeYDL,
 )
@@ -71,8 +72,8 @@ class TestPlaylists(unittest.TestCase):
         ie = DailymotionUserIE(dl)
         result = ie.extract('https://www.dailymotion.com/user/nqtv')
         self.assertIsPlaylist(result)
+        assertGreaterEqual(self, len(result['entries']), 100)
         self.assertEqual(result['title'], 'Rémi Gaillard')
-        self.assertTrue(len(result['entries']) >= 100)
 
     def test_vimeo_channel(self):
         dl = FakeYDL()
@@ -111,7 +112,7 @@ class TestPlaylists(unittest.TestCase):
         ie = VineUserIE(dl)
         result = ie.extract('https://vine.co/Visa')
         self.assertIsPlaylist(result)
-        self.assertTrue(len(result['entries']) >= 47)
+        assertGreaterEqual(self, len(result['entries']), 47)
 
     def test_ustream_channel(self):
         dl = FakeYDL()
@@ -119,7 +120,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://www.ustream.tv/channel/channeljapan')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '10874166')
-        self.assertTrue(len(result['entries']) >= 54)
+        assertGreaterEqual(self, len(result['entries']), 54)
 
     def test_soundcloud_set(self):
         dl = FakeYDL()
@@ -127,7 +128,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
         self.assertIsPlaylist(result)
         self.assertEqual(result['title'], 'The Royal Concept EP')
-        self.assertTrue(len(result['entries']) >= 6)
+        assertGreaterEqual(self, len(result['entries']), 6)
 
     def test_soundcloud_user(self):
         dl = FakeYDL()
@@ -135,7 +136,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('https://soundcloud.com/the-concept-band')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '9615865')
-        self.assertTrue(len(result['entries']) >= 12)
+        assertGreaterEqual(self, len(result['entries']), 12)
 
     def test_soundcloud_likes(self):
         dl = FakeYDL()
@@ -143,7 +144,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('https://soundcloud.com/the-concept-band/likes')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '9615865')
-        self.assertTrue(len(result['entries']) >= 1)
+        assertGreaterEqual(self, len(result['entries']), 1)
 
     def test_soundcloud_playlist(self):
         dl = FakeYDL()
@@ -153,7 +154,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertEqual(result['id'], '4110309')
         self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
         assertRegexpMatches(
-            self, result['description'], r'TILT Brass - Bowery Poetry Club')
+            self, result['description'], r'.*?TILT Brass - Bowery Poetry Club')
         self.assertEqual(len(result['entries']), 6)
 
     def test_livestream_event(self):
@@ -162,7 +163,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://new.livestream.com/tedx/cityenglish')
         self.assertIsPlaylist(result)
         self.assertEqual(result['title'], 'TEDCity2.0 (English)')
-        self.assertTrue(len(result['entries']) >= 4)
+        assertGreaterEqual(self, len(result['entries']), 4)
 
     def test_livestreamoriginal_folder(self):
         dl = FakeYDL()
@@ -170,7 +171,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
-        self.assertTrue(len(result['entries']) >= 28)
+        assertGreaterEqual(self, len(result['entries']), 28)
 
     def test_nhl_videocenter(self):
         dl = FakeYDL()
@@ -187,15 +188,15 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://bambuser.com/channel/pixelversity')
         self.assertIsPlaylist(result)
         self.assertEqual(result['title'], 'pixelversity')
-        self.assertTrue(len(result['entries']) >= 60)
+        assertGreaterEqual(self, len(result['entries']), 60)
 
     def test_bandcamp_album(self):
         dl = FakeYDL()
         ie = BandcampAlbumIE(dl)
-        result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
+        result = ie.extract('http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], 'Nightmare Night EP')
-        self.assertTrue(len(result['entries']) >= 4)
+        self.assertEqual(result['title'], 'Hierophany of the Open Grave')
+        assertGreaterEqual(self, len(result['entries']), 9)
         
     def test_smotri_community(self):
         dl = FakeYDL()
@@ -204,7 +205,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'kommuna')
         self.assertEqual(result['title'], 'КПРФ')
-        self.assertTrue(len(result['entries']) >= 4)
+        assertGreaterEqual(self, len(result['entries']), 4)
         
     def test_smotri_user(self):
         dl = FakeYDL()
@@ -213,7 +214,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'inspector')
         self.assertEqual(result['title'], 'Inspector')
-        self.assertTrue(len(result['entries']) >= 9)
+        assertGreaterEqual(self, len(result['entries']), 9)
 
     def test_AcademicEarthCourse(self):
         dl = FakeYDL()
@@ -232,7 +233,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'dvoe_iz_lartsa')
         self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
-        self.assertTrue(len(result['entries']) >= 24)
+        assertGreaterEqual(self, len(result['entries']), 24)
 
     def test_ivi_compilation_season(self):
         dl = FakeYDL()
@@ -241,7 +242,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
         self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
-        self.assertTrue(len(result['entries']) >= 12)
+        assertGreaterEqual(self, len(result['entries']), 12)
         
     def test_imdb_list(self):
         dl = FakeYDL()
@@ -260,7 +261,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertEqual(result['id'], 'cryptography')
         self.assertEqual(result['title'], 'Journey into cryptography')
         self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
-        self.assertTrue(len(result['entries']) >= 3)
+        assertGreaterEqual(self, len(result['entries']), 3)
 
     def test_EveryonesMixtape(self):
         dl = FakeYDL()
@@ -277,7 +278,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://rutube.ru/tags/video/1800/')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '1800')
-        self.assertTrue(len(result['entries']) >= 68)
+        assertGreaterEqual(self, len(result['entries']), 68)
 
     def test_rutube_person(self):
         dl = FakeYDL()
@@ -285,7 +286,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://rutube.ru/video/person/313878/')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '313878')
-        self.assertTrue(len(result['entries']) >= 37)
+        assertGreaterEqual(self, len(result['entries']), 37)
 
     def test_multiple_brightcove_videos(self):
         # https://github.com/rg3/youtube-dl/issues/2283
@@ -322,7 +323,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], '10')
         self.assertEqual(result['title'], 'Who are the hackers?')
-        self.assertTrue(len(result['entries']) >= 6)
+        assertGreaterEqual(self, len(result['entries']), 6)
 
     def test_toypics_user(self):
         dl = FakeYDL()
@@ -330,7 +331,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://videos.toypics.net/Mikey')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'Mikey')
-        self.assertTrue(len(result['entries']) >= 17)
+        assertGreaterEqual(self, len(result['entries']), 17)
 
     def test_xtube_user(self):
         dl = FakeYDL()
@@ -338,7 +339,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'greenshowers')
-        self.assertTrue(len(result['entries']) >= 155)
+        assertGreaterEqual(self, len(result['entries']), 155)
 
     def test_InstagramUser(self):
         dl = FakeYDL()
@@ -346,7 +347,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://instagram.com/porsche')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'porsche')
-        self.assertTrue(len(result['entries']) >= 2)
+        assertGreaterEqual(self, len(result['entries']), 2)
         test_video = next(
             e for e in result['entries']
             if e['id'] == '614605558512799803_462752227')
@@ -385,7 +386,7 @@ class TestPlaylists(unittest.TestCase):
         self.assertEqual(result['id'], '152147')
         self.assertEqual(
             result['title'], 'Brace Yourself - Today\'s Weirdest News')
-        self.assertTrue(len(result['entries']) >= 10)
+        assertGreaterEqual(self, len(result['entries']), 10)
 
     def test_TeacherTubeUser(self):
         dl = FakeYDL()
@@ -393,7 +394,7 @@ class TestPlaylists(unittest.TestCase):
         result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
         self.assertIsPlaylist(result)
         self.assertEqual(result['id'], 'rbhagwati2')
-        self.assertTrue(len(result['entries']) >= 179)
+        assertGreaterEqual(self, len(result['entries']), 179)
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/test_swfinterp.py b/test/test_swfinterp.py
new file mode 100644 (file)
index 0000000..b42cd74
--- /dev/null
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+import errno
+import io
+import json
+import re
+import subprocess
+
+from youtube_dl.swfinterp import SWFInterpreter
+
+
+TEST_DIR = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), 'swftests')
+
+
+class TestSWFInterpreter(unittest.TestCase):
+    pass
+
+
+def _make_testfunc(testfile):
+    m = re.match(r'^(.*)\.(as)$', testfile)
+    if not m:
+        return
+    test_id = m.group(1)
+
+    def test_func(self):
+        as_file = os.path.join(TEST_DIR, testfile)
+        swf_file = os.path.join(TEST_DIR, test_id + '.swf')
+        if ((not os.path.exists(swf_file))
+                or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
+            # Recompile
+            try:
+                subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
+            except OSError as ose:
+                if ose.errno == errno.ENOENT:
+                    print('mxmlc not found! Skipping test.')
+                    return
+                raise
+
+        with open(swf_file, 'rb') as swf_f:
+            swf_content = swf_f.read()
+        swfi = SWFInterpreter(swf_content)
+
+        with io.open(as_file, 'r', encoding='utf-8') as as_f:
+            as_content = as_f.read()
+
+        def _find_spec(key):
+            m = re.search(
+                r'(?m)^//\s*%s:\s*(.*?)\n' % re.escape(key), as_content)
+            if not m:
+                raise ValueError('Cannot find %s in %s' % (key, testfile))
+            return json.loads(m.group(1))
+
+        input_args = _find_spec('input')
+        output = _find_spec('output')
+
+        swf_class = swfi.extract_class(test_id)
+        func = swfi.extract_function(swf_class, 'main')
+        res = func(input_args)
+        self.assertEqual(res, output)
+
+    test_func.__name__ = str('test_swf_' + test_id)
+    setattr(TestSWFInterpreter, test_func.__name__, test_func)
+
+
+for testfile in os.listdir(TEST_DIR):
+    _make_testfunc(testfile)
+
+if __name__ == '__main__':
+    unittest.main()
index d95533959481df9b458f56c14d4857d3c5230252..604e76ab60ba42081c3b4779e77e2963038f43e5 100644 (file)
@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import unicode_literals
+
 # Allow direct execution
 import os
 import sys
@@ -16,35 +18,65 @@ from youtube_dl.utils import compat_str, compat_urlretrieve
 
 _TESTS = [
     (
-        u'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
-        u'js',
+        'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
+        'js',
         86,
-        u'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
+        '>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
     ),
     (
-        u'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
-        u'js',
+        'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
+        'js',
         85,
-        u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
+        '3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
     ),
     (
-        u'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
-        u'js',
+        'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
+        'js',
         90,
-        u']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
+        ']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
+    ),
+    (
+        'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
+        'js',
+        84,
+        'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
+    ),
+    (
+        'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
+        'js',
+        '2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
+        'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
     ),
     (
-        u'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
-        u'js',
+        'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
+        'swf',
+        86,
+        'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
+    ),
+    (
+        'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
+        'swf',
+        'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
+        '9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
+    ),
+    (
+        'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
+        'js',
         84,
-        u'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
+        '123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
     ),
     (
-        u'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
-        u'js',
-        u'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
-        u'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
+        'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
+        'js',
+        83,
+        '123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
     ),
+    (
+        'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
+        'js',
+        '4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
+        '82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
+    )
 ]
 
 
@@ -57,12 +89,12 @@ class TestSignature(unittest.TestCase):
 
 
 def make_tfunc(url, stype, sig_input, expected_sig):
-    basename = url.rpartition('/')[2]
-    m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
-    assert m, '%r should follow URL format' % basename
+    m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
+    assert m, '%r should follow URL format' % url
     test_id = m.group(1)
 
     def test_func(self):
+        basename = 'player-%s.%s' % (test_id, stype)
         fn = os.path.join(self.TESTDATA_DIR, basename)
 
         if not os.path.exists(fn):
index 73304fea2d5a7b72201c315456252900fdb584c9..bb583eaecbe95b7b66fe932698c50e9a882e91e8 100755 (executable)
Binary files a/youtube-dl and b/youtube-dl differ
index 26833e8f082276b9a1f8f0de3fbdc4ccf49242d6..59edc71375e6dbfcc458b0028ea5dc1a6991fb1d 100644 (file)
@@ -29,12 +29,6 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ playlist\ or\ the\ command\ line)\ if\ an\ error
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ occurs
 \-\-dump\-user\-agent\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ display\ the\ current\ browser\ identification
-\-\-user\-agent\ UA\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ user\ agent
-\-\-referer\ REF\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ referer,\ use\ if\ the\ video
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ access\ is\ restricted\ to\ one\ domain
-\-\-add\-header\ FIELD:VALUE\ \ \ \ \ \ \ \ \ specify\ a\ custom\ HTTP\ header\ and\ its\ value,
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ separated\ by\ a\ colon\ \[aq]:\[aq].\ You\ can\ use\ this
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ option\ multiple\ times
 \-\-list\-extractors\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ List\ all\ supported\ extractors\ and\ the\ URLs
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ they\ would\ handle
 \-\-extractor\-descriptions\ \ \ \ \ \ \ \ \ Output\ descriptions\ of\ all\ supported
@@ -42,35 +36,22 @@ redistribute it or use it however you like.
 \-\-proxy\ URL\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Use\ the\ specified\ HTTP/HTTPS\ proxy.\ Pass\ in
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ an\ empty\ string\ (\-\-proxy\ "")\ for\ direct
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ connection
-\-\-no\-check\-certificate\ \ \ \ \ \ \ \ \ \ \ Suppress\ HTTPS\ certificate\ validation.
-\-\-prefer\-insecure\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Use\ an\ unencrypted\ connection\ to\ retrieve
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ information\ about\ the\ video.\ (Currently
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ supported\ only\ for\ YouTube)
-\-\-cache\-dir\ DIR\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Location\ in\ the\ filesystem\ where\ youtube\-dl
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ can\ store\ some\ downloaded\ information
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ permanently.\ By\ default\ $XDG_CACHE_HOME
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /youtube\-dl\ or\ ~/.cache/youtube\-dl\ .\ At\ the
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ moment,\ only\ YouTube\ player\ files\ (for
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ videos\ with\ obfuscated\ signatures)\ are
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ cached,\ but\ that\ may\ change.
-\-\-no\-cache\-dir\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Disable\ filesystem\ caching
 \-\-socket\-timeout\ None\ \ \ \ \ \ \ \ \ \ \ \ Time\ to\ wait\ before\ giving\ up,\ in\ seconds
-\-\-bidi\-workaround\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Work\ around\ terminals\ that\ lack
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ bidirectional\ text\ support.\ Requires\ bidiv
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ or\ fribidi\ executable\ in\ PATH
 \-\-default\-search\ PREFIX\ \ \ \ \ \ \ \ \ \ Use\ this\ prefix\ for\ unqualified\ URLs.\ For
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ example\ "gvsearch2:"\ downloads\ two\ videos
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ from\ google\ videos\ for\ \ youtube\-dl\ "large
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ apple".\ Use\ the\ value\ "auto"\ to\ let
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ youtube\-dl\ guess.\ The\ default\ value\ "error"
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ just\ throws\ an\ error.
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ youtube\-dl\ guess\ ("auto_warning"\ to\ emit\ a
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ warning\ when\ guessing).\ "error"\ just\ throws
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ an\ error.\ The\ default\ value\ "fixup_error"
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ repairs\ broken\ URLs,\ but\ emits\ an\ error\ if
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ this\ is\ not\ possible\ instead\ of\ searching.
 \-\-ignore\-config\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Do\ not\ read\ configuration\ files.\ When\ given
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ in\ the\ global\ configuration\ file\ /etc
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /youtube\-dl.conf:\ do\ not\ read\ the\ user
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ configuration\ in\ ~/.config/youtube\-dl.conf
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (%APPDATA%/youtube\-dl/config.txt\ on
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Windows)
-\-\-encoding\ ENCODING\ \ \ \ \ \ \ \ \ \ \ \ \ \ Force\ the\ specified\ encoding\ (experimental)
 \f[]
 .fi
 .SS Video Selection:
@@ -128,9 +109,9 @@ redistribute it or use it however you like.
 .IP
 .nf
 \f[C]
-\-t,\ \-\-title\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ title\ in\ file\ name\ (default)
+\-a,\ \-\-batch\-file\ FILE\ \ \ \ \ \ \ \ \ \ \ \ file\ containing\ URLs\ to\ download\ (\[aq]\-\[aq]\ for
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ stdin)
 \-\-id\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ only\ video\ ID\ in\ file\ name
-\-l,\ \-\-literal\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ [deprecated]\ alias\ of\ \-\-title
 \-A,\ \-\-auto\-number\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ number\ downloaded\ files\ starting\ from\ 00000
 \-o,\ \-\-output\ TEMPLATE\ \ \ \ \ \ \ \ \ \ \ \ output\ filename\ template.\ Use\ %(title)s\ to
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ get\ the\ title,\ %(uploader)s\ for\ the
@@ -163,18 +144,15 @@ redistribute it or use it however you like.
 \-\-restrict\-filenames\ \ \ \ \ \ \ \ \ \ \ \ \ Restrict\ filenames\ to\ only\ ASCII
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ characters,\ and\ avoid\ "&"\ and\ spaces\ in
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ filenames
-\-a,\ \-\-batch\-file\ FILE\ \ \ \ \ \ \ \ \ \ \ \ file\ containing\ URLs\ to\ download\ (\[aq]\-\[aq]\ for
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ stdin)
-\-\-load\-info\ FILE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ json\ file\ containing\ the\ video\ information
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (created\ with\ the\ "\-\-write\-json"\ option)
+\-t,\ \-\-title\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ [deprecated]\ use\ title\ in\ file\ name
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (default)
+\-l,\ \-\-literal\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ [deprecated]\ alias\ of\ \-\-title
 \-w,\ \-\-no\-overwrites\ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ overwrite\ files
 \-c,\ \-\-continue\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ force\ resume\ of\ partially\ downloaded\ files.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ By\ default,\ youtube\-dl\ will\ resume
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ downloads\ if\ possible.
 \-\-no\-continue\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ resume\ partially\ downloaded\ files
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (restart\ from\ beginning)
-\-\-cookies\ FILE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ file\ to\ read\ cookies\ from\ and\ dump\ cookie
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ jar\ in
 \-\-no\-part\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ use\ .part\ files
 \-\-no\-mtime\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ use\ the\ Last\-modified\ header\ to\ set
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ file\ modification\ time
@@ -184,6 +162,19 @@ redistribute it or use it however you like.
 \-\-write\-annotations\ \ \ \ \ \ \ \ \ \ \ \ \ \ write\ video\ annotations\ to\ a\ .annotation
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ file
 \-\-write\-thumbnail\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ write\ thumbnail\ image\ to\ disk
+\-\-load\-info\ FILE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ json\ file\ containing\ the\ video\ information
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (created\ with\ the\ "\-\-write\-json"\ option)
+\-\-cookies\ FILE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ file\ to\ read\ cookies\ from\ and\ dump\ cookie
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ jar\ in
+\-\-cache\-dir\ DIR\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Location\ in\ the\ filesystem\ where\ youtube\-dl
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ can\ store\ some\ downloaded\ information
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ permanently.\ By\ default\ $XDG_CACHE_HOME
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /youtube\-dl\ or\ ~/.cache/youtube\-dl\ .\ At\ the
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ moment,\ only\ YouTube\ player\ files\ (for
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ videos\ with\ obfuscated\ signatures)\ are
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ cached,\ but\ that\ may\ change.
+\-\-no\-cache\-dir\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Disable\ filesystem\ caching
+\-\-rm\-cache\-dir\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Delete\ all\ filesystem\ cache\ files
 \f[]
 .fi
 .SS Verbosity / Simulation Options:
@@ -218,6 +209,26 @@ redistribute it or use it however you like.
 \-\-print\-traffic\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Display\ sent\ and\ read\ HTTP\ traffic
 \f[]
 .fi
+.SS Workarounds:
+.IP
+.nf
+\f[C]
+\-\-encoding\ ENCODING\ \ \ \ \ \ \ \ \ \ \ \ \ \ Force\ the\ specified\ encoding\ (experimental)
+\-\-no\-check\-certificate\ \ \ \ \ \ \ \ \ \ \ Suppress\ HTTPS\ certificate\ validation.
+\-\-prefer\-insecure\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Use\ an\ unencrypted\ connection\ to\ retrieve
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ information\ about\ the\ video.\ (Currently
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ supported\ only\ for\ YouTube)
+\-\-user\-agent\ UA\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ user\ agent
+\-\-referer\ REF\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ referer,\ use\ if\ the\ video
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ access\ is\ restricted\ to\ one\ domain
+\-\-add\-header\ FIELD:VALUE\ \ \ \ \ \ \ \ \ specify\ a\ custom\ HTTP\ header\ and\ its\ value,
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ separated\ by\ a\ colon\ \[aq]:\[aq].\ You\ can\ use\ this
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ option\ multiple\ times
+\-\-bidi\-workaround\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Work\ around\ terminals\ that\ lack
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ bidirectional\ text\ support.\ Requires\ bidiv
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ or\ fribidi\ executable\ in\ PATH
+\f[]
+.fi
 .SS Video Format Options:
 .IP
 .nf
index 498e841dc762910ea6a1fcb73daa104f5d5af70d..4077a163d14cf275e9e1fdf6b74c5cb72fc5f54e 100644 (file)
@@ -4,7 +4,7 @@ __youtube_dl()
     COMPREPLY=()
     cur="${COMP_WORDS[COMP_CWORD]}"
     prev="${COMP_WORDS[COMP_CWORD-1]}"
-    opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --user-agent --referer --add-header --list-extractors --extractor-descriptions --proxy --no-check-certificate --prefer-insecure --cache-dir --no-cache-dir --socket-timeout --bidi-workaround --default-search --ignore-config --encoding --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --include-ads --youtube-include-dash-manifest --rate-limit --retries --buffer-size --no-resize-buffer --test --title --id --literal --auto-number --output --autonumber-size --restrict-filenames --batch-file --load-info --no-overwrites --continue --no-continue --cookies --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --format --all-formats --prefer-free-formats --max-quality --list-formats --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg"
+    opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --proxy --socket-timeout --default-search --ignore-config --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --include-ads --youtube-include-dash-manifest --rate-limit --retries --buffer-size --no-resize-buffer --test --batch-file --id --auto-number --output --autonumber-size --restrict-filenames --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --format --all-formats --prefer-free-formats --max-quality --list-formats --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg"
     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
     fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
     diropts="--cache-dir"
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
deleted file mode 100644 (file)
index 5c8e676..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# Legacy file for backwards compatibility, use youtube_dl.downloader instead!
-from .downloader import FileDownloader as RealFileDownloader
-from .downloader import get_suitable_downloader
-
-
-# This class reproduces the old behaviour of FileDownloader
-class FileDownloader(RealFileDownloader):
-    def _do_download(self, filename, info_dict):
-        real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
-        for ph in self._progress_hooks:
-            real_fd.add_progress_hook(ph)
-        return real_fd.download(filename, info_dict)
index 3dff723b81fff6947ac8cf08c62a275843f359f9..14a1d06ab1ed3350547822cac71501745a14842a 100755 (executable)
@@ -275,7 +275,7 @@ class YoutubeDL(object):
             return message
 
         assert hasattr(self, '_output_process')
-        assert type(message) == type('')
+        assert isinstance(message, compat_str)
         line_count = message.count('\n') + 1
         self._output_process.stdin.write((message + '\n').encode('utf-8'))
         self._output_process.stdin.flush()
@@ -303,7 +303,7 @@ class YoutubeDL(object):
 
     def to_stderr(self, message):
         """Print message to stderr."""
-        assert type(message) == type('')
+        assert isinstance(message, compat_str)
         if self.params.get('logger'):
             self.params['logger'].error(message)
         else:
@@ -849,7 +849,7 @@ class YoutubeDL(object):
         # Keep for backwards compatibility
         info_dict['stitle'] = info_dict['title']
 
-        if not 'format' in info_dict:
+        if 'format' not in info_dict:
             info_dict['format'] = info_dict['ext']
 
         reason = self._match_entry(info_dict)
@@ -999,7 +999,7 @@ class YoutubeDL(object):
                     if info_dict.get('requested_formats') is not None:
                         downloaded = []
                         success = True
-                        merger = FFmpegMergerPP(self)
+                        merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
                         if not merger._get_executable():
                             postprocessors = []
                             self.report_warning('You have requested multiple '
@@ -1197,6 +1197,10 @@ class YoutubeDL(object):
             if res:
                 res += ', '
             res += format_bytes(fdict['filesize'])
+        elif fdict.get('filesize_approx') is not None:
+            if res:
+                res += ', '
+            res += '~' + format_bytes(fdict['filesize_approx'])
         return res
 
     def list_formats(self, info_dict):
@@ -1230,14 +1234,18 @@ class YoutubeDL(object):
         if not self.params.get('verbose'):
             return
 
-        write_string(
+        if type('') is not compat_str:
+            # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
+            self.report_warning(
+                'Your Python is broken! Update to a newer and supported version')
+
+        encoding_str = (
             '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
                 locale.getpreferredencoding(),
                 sys.getfilesystemencoding(),
                 sys.stdout.encoding,
-                self.get_encoding()),
-            encoding=None
-        )
+                self.get_encoding()))
+        write_string(encoding_str, encoding=None)
 
         self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
         try:
index 5e16a549177255a7bca62292d3ca87233835269c..f9c16e426d1daa207ecec420a47a8d4692de00f6 100644 (file)
@@ -64,18 +64,20 @@ __authors__  = (
     'Adam Malcontenti-Wilson',
     'Tobias Bell',
     'Naglis Jonaitis',
+    'Charles Chen',
+    'Hassaan Ali',
+    'Dobrosław Żybort',
 )
 
 __license__ = 'Public Domain'
 
 import codecs
 import io
-import locale
 import optparse
 import os
 import random
-import re
 import shlex
+import shutil
 import sys
 
 
@@ -97,7 +99,7 @@ from .utils import (
     write_string,
 )
 from .update import update_self
-from .FileDownloader import (
+from .downloader import (
     FileDownloader,
 )
 from .extractor import gen_extractors
@@ -222,6 +224,7 @@ def parseOpts(overrideArguments=None):
     downloader     = optparse.OptionGroup(parser, 'Download Options')
     postproc       = optparse.OptionGroup(parser, 'Post-processing Options')
     filesystem     = optparse.OptionGroup(parser, 'Filesystem Options')
+    workarounds    = optparse.OptionGroup(parser, 'Workarounds')
     verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
 
     general.add_option('-h', '--help',
@@ -238,14 +241,6 @@ def parseOpts(overrideArguments=None):
     general.add_option('--dump-user-agent',
             action='store_true', dest='dump_user_agent',
             help='display the current browser identification', default=False)
-    general.add_option('--user-agent',
-            dest='user_agent', help='specify a custom user agent', metavar='UA')
-    general.add_option('--referer',
-            dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
-            metavar='REF', default=None)
-    general.add_option('--add-header',
-            dest='headers', help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', action="append",
-            metavar='FIELD:VALUE')
     general.add_option('--list-extractors',
             action='store_true', dest='list_extractors',
             help='List all supported extractors and the URLs they would handle', default=False)
@@ -255,33 +250,17 @@ def parseOpts(overrideArguments=None):
     general.add_option(
         '--proxy', dest='proxy', default=None, metavar='URL',
         help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
-    general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
-    general.add_option(
-        '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
-        help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
-    general.add_option(
-        '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
-        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
-    general.add_option(
-        '--no-cache-dir', action='store_const', const=None, dest='cachedir',
-        help='Disable filesystem caching')
     general.add_option(
         '--socket-timeout', dest='socket_timeout',
         type=float, default=None, help=u'Time to wait before giving up, in seconds')
-    general.add_option(
-        '--bidi-workaround', dest='bidi_workaround', action='store_true',
-        help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
     general.add_option(
         '--default-search',
         dest='default_search', metavar='PREFIX',
-        help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for  youtube-dl "large apple". Use the value "auto" to let youtube-dl guess. The default value "error" just throws an error.')
+        help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for  youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
     general.add_option(
         '--ignore-config',
         action='store_true',
         help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
-    general.add_option(
-        '--encoding', dest='encoding', metavar='ENCODING',
-        help='Force the specified encoding (experimental)')
 
     selection.add_option(
         '--playlist-start',
@@ -382,6 +361,33 @@ def parseOpts(overrideArguments=None):
             help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
     downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
 
+    workarounds.add_option(
+        '--encoding', dest='encoding', metavar='ENCODING',
+        help='Force the specified encoding (experimental)')
+    workarounds.add_option(
+        '--no-check-certificate', action='store_true',
+        dest='no_check_certificate', default=False,
+        help='Suppress HTTPS certificate validation.')
+    workarounds.add_option(
+        '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
+        help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
+    workarounds.add_option(
+        '--user-agent', metavar='UA',
+        dest='user_agent', help='specify a custom user agent')
+    workarounds.add_option(
+        '--referer', metavar='REF',
+        dest='referer', default=None,
+        help='specify a custom referer, use if the video access is restricted to one domain',
+    )
+    workarounds.add_option(
+        '--add-header', metavar='FIELD:VALUE',
+        dest='headers', action='append',
+        help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
+    )
+    workarounds.add_option(
+        '--bidi-workaround', dest='bidi_workaround', action='store_true',
+        help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
+
     verbosity.add_option('-q', '--quiet',
             action='store_true', dest='quiet', help='activates quiet mode', default=False)
     verbosity.add_option(
@@ -439,12 +445,10 @@ def parseOpts(overrideArguments=None):
             help='Display sent and read HTTP traffic')
 
 
-    filesystem.add_option('-t', '--title',
-            action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
+    filesystem.add_option('-a', '--batch-file',
+            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
     filesystem.add_option('--id',
             action='store_true', dest='useid', help='use only video ID in file name', default=False)
-    filesystem.add_option('-l', '--literal',
-            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
     filesystem.add_option('-A', '--auto-number',
             action='store_true', dest='autonumber',
             help='number downloaded files starting from 00000', default=False)
@@ -470,11 +474,10 @@ def parseOpts(overrideArguments=None):
     filesystem.add_option('--restrict-filenames',
             action='store_true', dest='restrictfilenames',
             help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
-    filesystem.add_option('-a', '--batch-file',
-            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
-    filesystem.add_option('--load-info',
-            dest='load_info_filename', metavar='FILE',
-            help='json file containing the video information (created with the "--write-json" option)')
+    filesystem.add_option('-t', '--title',
+            action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
+    filesystem.add_option('-l', '--literal',
+            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
     filesystem.add_option('-w', '--no-overwrites',
             action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
     filesystem.add_option('-c', '--continue',
@@ -482,8 +485,6 @@ def parseOpts(overrideArguments=None):
     filesystem.add_option('--no-continue',
             action='store_false', dest='continue_dl',
             help='do not resume partially downloaded files (restart from beginning)')
-    filesystem.add_option('--cookies',
-            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
     filesystem.add_option('--no-part',
             action='store_true', dest='nopart', help='do not use .part files', default=False)
     filesystem.add_option('--no-mtime',
@@ -501,6 +502,20 @@ def parseOpts(overrideArguments=None):
     filesystem.add_option('--write-thumbnail',
             action='store_true', dest='writethumbnail',
             help='write thumbnail image to disk', default=False)
+    filesystem.add_option('--load-info',
+            dest='load_info_filename', metavar='FILE',
+            help='json file containing the video information (created with the "--write-json" option)')
+    filesystem.add_option('--cookies',
+            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
+    filesystem.add_option(
+        '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
+        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
+    filesystem.add_option(
+        '--no-cache-dir', action='store_const', const=None, dest='cachedir',
+        help='Disable filesystem caching')
+    filesystem.add_option(
+        '--rm-cache-dir', action='store_true', dest='rm_cachedir',
+        help='Delete all filesystem cache files')
 
 
     postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
@@ -534,6 +549,7 @@ def parseOpts(overrideArguments=None):
     parser.add_option_group(downloader)
     parser.add_option_group(filesystem)
     parser.add_option_group(verbosity)
+    parser.add_option_group(workarounds)
     parser.add_option_group(video_format)
     parser.add_option_group(subtitles)
     parser.add_option_group(authentication)
@@ -633,7 +649,7 @@ def _real_main(argv=None):
             if desc is False:
                 continue
             if hasattr(ie, 'SEARCH_KEY'):
-                _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise')
+                _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
                 _COUNTS = (u'', u'5', u'10', u'all')
                 desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
             compat_print(desc)
@@ -694,7 +710,7 @@ def _real_main(argv=None):
         date = DateRange.day(opts.date)
     else:
         date = DateRange(opts.dateafter, opts.datebefore)
-    if opts.default_search not in ('auto', 'auto_warning', None) and ':' not in opts.default_search:
+    if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
         parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
 
     # Do not download videos when there are audio-only formats
@@ -833,9 +849,26 @@ def _real_main(argv=None):
         if opts.update_self:
             update_self(ydl.to_screen, opts.verbose)
 
+        # Remove cache dir
+        if opts.rm_cachedir:
+            if opts.cachedir is None:
+                ydl.to_screen(u'No cache dir specified (Did you combine --no-cache-dir and --rm-cache-dir?)')
+            else:
+                if ('.cache' not in opts.cachedir) or ('youtube-dl' not in opts.cachedir):
+                    ydl.to_screen(u'Not removing directory %s - this does not look like a cache dir')
+                    retcode = 141
+                else:
+                    ydl.to_screen(
+                        u'Removing cache dir %s .' % opts.cachedir,
+                        skip_eol=True)
+                    if os.path.exists(opts.cachedir):
+                        ydl.to_screen(u'.', skip_eol=True)
+                        shutil.rmtree(opts.cachedir)
+                    ydl.to_screen(u'.')
+
         # Maybe do nothing
         if (len(all_urls) < 1) and (opts.load_info_filename is None):
-            if not opts.update_self:
+            if not (opts.update_self or opts.rm_cachedir):
                 parser.error(u'you must provide at least one URL')
             else:
                 sys.exit()
index e6be6ae6c878c9ede7cd2cf3b6be663e22bb8be1..71353f607daead364acbdad83b18b79e61a5bffa 100644 (file)
@@ -220,6 +220,7 @@ class F4mFD(FileDownloader):
 
     def real_download(self, filename, info_dict):
         man_url = info_dict['url']
+        requested_bitrate = info_dict.get('tbr')
         self.to_screen('[download] Downloading f4m manifest')
         manifest = self.ydl.urlopen(man_url).read()
         self.report_destination(filename)
@@ -233,8 +234,14 @@ class F4mFD(FileDownloader):
 
         doc = etree.fromstring(manifest)
         formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
-        formats = sorted(formats, key=lambda f: f[0])
-        rate, media = formats[-1]
+        if requested_bitrate is None:
+            # get the best format
+            formats = sorted(formats, key=lambda f: f[0])
+            rate, media = formats[-1]
+        else:
+            rate, media = list(filter(
+                lambda f: int(f[0]) == requested_bitrate, formats))[0]
+
         base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
         bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
         metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
index e49ac3e52783608942b34deed950703d2a85dc39..66c873789e1030707e72b63140c01bba75012088 100644 (file)
@@ -1,5 +1,7 @@
+from .abc import ABCIE
 from .academicearth import AcademicEarthCourseIE
 from .addanime import AddAnimeIE
+from .adultswim import AdultSwimIE
 from .aftonbladet import AftonbladetIE
 from .anitube import AnitubeIE
 from .aol import AolIE
@@ -52,6 +54,7 @@ from .cnn import (
 from .collegehumor import CollegeHumorIE
 from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
 from .condenast import CondeNastIE
+from .cracked import CrackedIE
 from .criterion import CriterionIE
 from .crunchyroll import CrunchyrollIE
 from .cspan import CSpanIE
@@ -62,6 +65,7 @@ from .dailymotion import (
     DailymotionUserIE,
 )
 from .daum import DaumIE
+from .dfb import DFBIE
 from .dotsub import DotsubIE
 from .dreisat import DreiSatIE
 from .drtv import DRTVIE
@@ -108,9 +112,11 @@ from .funnyordie import FunnyOrDieIE
 from .gamekings import GamekingsIE
 from .gameone import GameOneIE
 from .gamespot import GameSpotIE
+from .gamestar import GameStarIE
 from .gametrailers import GametrailersIE
 from .gdcvault import GDCVaultIE
 from .generic import GenericIE
+from .godtube import GodTubeIE
 from .googleplus import GooglePlusIE
 from .googlesearch import GoogleSearchIE
 from .gorillavid import GorillaVidIE
@@ -137,6 +143,7 @@ from .ivi import (
     IviIE,
     IviCompilationIE
 )
+from .izlesene import IzleseneIE
 from .jadorecettepub import JadoreCettePubIE
 from .jeuxvideo import JeuxVideoIE
 from .jukebox import JukeboxIE
@@ -148,6 +155,7 @@ from .khanacademy import KhanAcademyIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
 from .kontrtube import KontrTubeIE
+from .krasview import KrasViewIE
 from .ku6 import Ku6IE
 from .la7 import LA7IE
 from .lifenews import LifeNewsIE
@@ -170,6 +178,7 @@ from .metacafe import MetacafeIE
 from .metacritic import MetacriticIE
 from .mit import TechTVMITIE, MITIE, OCWMITIE
 from .mixcloud import MixcloudIE
+from .mlb import MLBIE
 from .mpora import MporaIE
 from .mofosex import MofosexIE
 from .mooshare import MooshareIE
@@ -249,10 +258,12 @@ from .rutube import (
     RutubePersonIE,
 )
 from .rutv import RUTVIE
+from .sapo import SapoIE
 from .savefrom import SaveFromIE
 from .scivee import SciVeeIE
 from .screencast import ScreencastIE
 from .servingsys import ServingSysIE
+from .shared import SharedIE
 from .sina import SinaIE
 from .slideshare import SlideshareIE
 from .slutload import SlutloadIE
@@ -262,6 +273,8 @@ from .smotri import (
     SmotriUserIE,
     SmotriBroadcastIE,
 )
+from .snotr import SnotrIE
+from .sockshare import SockshareIE
 from .sohu import SohuIE
 from .soundcloud import (
     SoundcloudIE,
@@ -313,6 +326,8 @@ from .tumblr import TumblrIE
 from .tutv import TutvIE
 from .tvigle import TvigleIE
 from .tvp import TvpIE
+from .tvplay import TVPlayIE
+from .ubu import UbuIE
 from .udemy import (
     UdemyIE,
     UdemyCourseIE
@@ -334,6 +349,7 @@ from .videofyme import VideofyMeIE
 from .videopremium import VideoPremiumIE
 from .videott import VideoTtIE
 from .videoweed import VideoWeedIE
+from .vidme import VidmeIE
 from .vimeo import (
     VimeoIE,
     VimeoChannelIE,
@@ -396,6 +412,7 @@ from .youtube import (
     YoutubeUserIE,
     YoutubeWatchLaterIE,
 )
+
 from .zdf import ZDFIE
 
 
diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py
new file mode 100644 (file)
index 0000000..7d89f44
--- /dev/null
@@ -0,0 +1,48 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+
+
+class ABCIE(InfoExtractor):
+    IE_NAME = 'abc.net.au'
+    _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
+        'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
+        'info_dict': {
+            'id': '5624716',
+            'ext': 'mp4',
+            'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
+            'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        urls_info_json = self._search_regex(
+            r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
+            flags=re.DOTALL)
+        urls_info = json.loads(urls_info_json.replace('\'', '"'))
+        formats = [{
+            'url': url_info['url'],
+            'width': int(url_info['width']),
+            'height': int(url_info['height']),
+            'tbr': int(url_info['bitrate']),
+            'filesize': int(url_info['filesize']),
+        } for url_info in urls_info]
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': self._og_search_title(webpage),
+            'formats': formats,
+            'description': self._og_search_description(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py
new file mode 100644 (file)
index 0000000..a00bfcb
--- /dev/null
@@ -0,0 +1,139 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+class AdultSwimIE(InfoExtractor):
+    _VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
+    _TEST = {
+        'url': 'http://video.adultswim.com/rick-and-morty/close-rick-counters-of-the-rick-kind.html?x=y#title',
+        'playlist': [
+            {
+                'md5': '4da359ec73b58df4575cd01a610ba5dc',
+                'info_dict': {
+                    'id': '8a250ba1450996e901453d7f02ca02f5',
+                    'ext': 'flv',
+                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 1',
+                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
+                    'uploader': 'Rick and Morty',
+                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
+                }
+            },
+            {
+                'md5': 'ffbdf55af9331c509d95350bd0cc1819',
+                'info_dict': {
+                    'id': '8a250ba1450996e901453d7f4bd102f6',
+                    'ext': 'flv',
+                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 2',
+                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
+                    'uploader': 'Rick and Morty',
+                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
+                }
+            },
+            {
+                'md5': 'b92409635540304280b4b6c36bd14a0a',
+                'info_dict': {
+                    'id': '8a250ba1450996e901453d7fa73c02f7',
+                    'ext': 'flv',
+                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 3',
+                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
+                    'uploader': 'Rick and Morty',
+                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
+                }
+            },
+            {
+                'md5': 'e8818891d60e47b29cd89d7b0278156d',
+                'info_dict': {
+                    'id': '8a250ba1450996e901453d7fc8ba02f8',
+                    'ext': 'flv',
+                    'title': 'Rick and Morty Close Rick-Counters of the Rick Kind part 4',
+                    'description': 'Rick has a run in with some old associates, resulting in a fallout with Morty. You got any chips, broh?',
+                    'uploader': 'Rick and Morty',
+                    'thumbnail': 'http://i.cdn.turner.com/asfix/repository/8a250ba13f865824013fc9db8b6b0400/thumbnail_267549017116827057.jpg'
+                }
+            }
+        ]
+    }
+
+    _video_extensions = {
+        '3500': 'flv',
+        '640': 'mp4',
+        '150': 'mp4',
+        'ipad': 'm3u8',
+        'iphone': 'm3u8'
+    }
+    _video_dimensions = {
+        '3500': (1280, 720),
+        '640': (480, 270),
+        '150': (320, 180)
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_path = mobj.group('path')
+
+        webpage = self._download_webpage(url, video_path)
+        episode_id = self._html_search_regex(r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', webpage, 'episode_id')
+        title = self._og_search_title(webpage)
+
+        index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
+        idoc = self._download_xml(index_url, title, 'Downloading episode index', 'Unable to download episode index')
+
+        episode_el = idoc.find('.//episode')
+        show_title = episode_el.attrib.get('collectionTitle')
+        episode_title = episode_el.attrib.get('title')
+        thumbnail = episode_el.attrib.get('thumbnailUrl')
+        description = episode_el.find('./description').text.strip()
+
+        entries = []
+        segment_els = episode_el.findall('./segments/segment')
+
+        for part_num, segment_el in enumerate(segment_els):
+            segment_id = segment_el.attrib.get('id')
+            segment_title = '%s %s part %d' % (show_title, episode_title, part_num + 1)
+            thumbnail = segment_el.attrib.get('thumbnailUrl')
+            duration = segment_el.attrib.get('duration')
+
+            segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
+            idoc = self._download_xml(segment_url, segment_title, 'Downloading segment information', 'Unable to download segment information')
+
+            formats = []
+            file_els = idoc.findall('.//files/file')
+
+            for file_el in file_els:
+                bitrate = file_el.attrib.get('bitrate')
+                type = file_el.attrib.get('type')
+                width, height = self._video_dimensions.get(bitrate, (None, None))
+                formats.append({
+                    'format_id': '%s-%s' % (bitrate, type),
+                    'url': file_el.text,
+                    'ext': self._video_extensions.get(bitrate, 'mp4'),
+                    # The bitrate may not be a number (for example: 'iphone')
+                    'tbr': int(bitrate) if bitrate.isdigit() else None,
+                    'height': height,
+                    'width': width
+                })
+
+            self._sort_formats(formats)
+
+            entries.append({
+                'id': segment_id,
+                'title': segment_title,
+                'formats': formats,
+                'uploader': show_title,
+                'thumbnail': thumbnail,
+                'duration': duration,
+                'description': description
+            })
+
+        return {
+            '_type': 'playlist',
+            'id': episode_id,
+            'display_id': video_path,
+            'entries': entries,
+            'title': '%s %s' % (show_title, episode_title),
+            'description': description,
+            'thumbnail': thumbnail
+        }
index 34f0cd49bafa104d3e3c175a4bd0ab6bf2494c1c..7bd7978841d06747145feeda56624de84747fcc1 100644 (file)
@@ -32,7 +32,7 @@ class AllocineIE(InfoExtractor):
             'id': '19540403',
             'ext': 'mp4',
             'title': 'Planes 2 Bande-annonce VF',
-            'description': 'md5:c4b1f7bd682a91de6491ada267ec0f4d',
+            'description': 'md5:eeaffe7c2d634525e21159b93acf3b1e',
             'thumbnail': 're:http://.*\.jpg',
         },
     }, {
@@ -42,7 +42,7 @@ class AllocineIE(InfoExtractor):
             'id': '19544709',
             'ext': 'mp4',
             'title': 'Dragons 2 - Bande annonce finale VF',
-            'description': 'md5:e74a4dc750894bac300ece46c7036490',
+            'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac',
             'thumbnail': 're:http://.*\.jpg',
         },
     }]
index b36a4d46a6dd435883eb911de2e3530604476c07..957bdefcbec7666473530815df9e3eb5dc88e096 100644 (file)
@@ -7,23 +7,34 @@ from .common import InfoExtractor
 from ..utils import (
     determine_ext,
     ExtractorError,
+    qualities,
+    compat_urllib_parse_urlparse,
+    compat_urllib_parse,
 )
 
 
 class ARDIE(InfoExtractor):
-    _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+    _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
 
-    _TEST = {
-        'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',
-        'file': '19288786.mp4',
-        'md5': '515bf47ce209fb3f5a61b7aad364634c',
+    _TESTS = [{
+        'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
+        'file': '22429276.mp4',
+        'md5': '469751912f1de0816a9fc9df8336476c',
         'info_dict': {
-            'title': 'Edward Snowden im Interview - Held oder Verräter?',
-            'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',
-            'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',
+            'title': 'Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?',
+            'description': 'Das Erste Mediathek [ARD]: Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?, Anne Will, Über die Spionage-Affäre diskutieren Clemens Binninger, Katrin Göring-Eckardt, Georg Mascolo, Andrew B. Denison und Constanze Kurz.. Das Video zur Sendung Anne Will am Mittwoch, 16.07.2014',
         },
         'skip': 'Blocked outside of Germany',
-    }
+    }, {
+        'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916',
+        'info_dict': {
+            'id': '22490580',
+            'ext': 'mp4',
+            'title': 'Das Wunder von Wolbeck (Video tgl. ab 20 Uhr)',
+            'description': 'Auf einem restaurierten Hof bei Wolbeck wird der Heilpraktiker Raffael Lembeck eines morgens von seiner Frau Stella tot aufgefunden. Das Opfer war offensichtlich in seiner Praxis zu Fall gekommen und ist dann verblutet, erklärt Prof. Boerne am Tatort.',
+        },
+        'skip': 'Blocked outside of Germany',
+    }]
 
     def _real_extract(self, url):
         # determine video id from url
@@ -35,6 +46,9 @@ class ARDIE(InfoExtractor):
         else:
             video_id = m.group('video_id')
 
+        urlp = compat_urllib_parse_urlparse(url)
+        url = urlp._replace(path=compat_urllib_parse.quote(urlp.path.encode('utf-8'))).geturl()
+
         webpage = self._download_webpage(url, video_id)
 
         title = self._html_search_regex(
@@ -43,40 +57,64 @@ class ARDIE(InfoExtractor):
              r'<h4 class="headline">(.*?)</h4>'],
             webpage, 'title')
         description = self._html_search_meta(
-            'dcterms.abstract', webpage, 'description')
-        thumbnail = self._og_search_thumbnail(webpage)
-
-
-        media_info = self._download_json(
-            'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
-        # The second element of the _mediaArray contains the standard http urls
-        streams = media_info['_mediaArray'][1]['_mediaStreamArray']
-        if not streams:
-            if '"fsk"' in webpage:
-                raise ExtractorError('This video is only available after 20:00')
-
-        formats = []
-
-        for s in streams:
-            if type(s['_stream']) == list:
-                for index, url in enumerate(s['_stream'][::-1]):
-                    quality = s['_quality'] + index
-                    formats.append({
-                        'quality': quality,
-                        'url': url,
-                        'format_id': '%s-%s' % (determine_ext(url), quality)
+            'dcterms.abstract', webpage, 'description', default=None)
+        if description is None:
+            description = self._html_search_meta(
+                'description', webpage, 'meta description')
+
+        # Thumbnail is sometimes not present.
+        # It is in the mobile version, but that seems to use a different URL
+        # structure altogether.
+        thumbnail = self._og_search_thumbnail(webpage, default=None)
+
+        media_streams = re.findall(r'''(?x)
+            mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
+            "([^"]+)"''', webpage)
+
+        if media_streams:
+            QUALITIES = qualities(['lo', 'hi', 'hq'])
+            formats = []
+            for furl in set(media_streams):
+                if furl.endswith('.f4m'):
+                    fid = 'f4m'
+                else:
+                    fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
+                    fid = fid_m.group(1) if fid_m else None
+                formats.append({
+                    'quality': QUALITIES(fid),
+                    'format_id': fid,
+                    'url': furl,
+                })
+        else:  # request JSON file
+            media_info = self._download_json(
+                'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
+            # The second element of the _mediaArray contains the standard http urls
+            streams = media_info['_mediaArray'][1]['_mediaStreamArray']
+            if not streams:
+                if '"fsk"' in webpage:
+                    raise ExtractorError('This video is only available after 20:00')
+
+            formats = []
+            for s in streams:
+                if type(s['_stream']) == list:
+                    for index, url in enumerate(s['_stream'][::-1]):
+                        quality = s['_quality'] + index
+                        formats.append({
+                            'quality': quality,
+                            'url': url,
+                            'format_id': '%s-%s' % (determine_ext(url), quality)
                         })
-                continue
+                    continue
 
-            format = {
-                'quality': s['_quality'],
-                'url': s['_stream'],
-            }
+                format = {
+                    'quality': s['_quality'],
+                    'url': s['_stream'],
+                }
 
-            format['format_id'] = '%s-%s' % (
-                determine_ext(format['url']), format['quality'])
+                format['format_id'] = '%s-%s' % (
+                    determine_ext(format['url']), format['quality'])
 
-            formats.append(format)
+                formats.append(format)
 
         self._sort_formats(formats)
 
index 7d558e262ecea44df6b025f0db716b82d975b314..3e461e715e141b1ff4a294eb01b7657d16f05d4b 100644 (file)
@@ -52,7 +52,7 @@ class BlinkxIE(InfoExtractor):
                     'height': int(m['h']),
                 })
             elif m['type'] == 'original':
-                duration = m['d']
+                duration = float(m['d'])
             elif m['type'] == 'youtube':
                 yt_id = m['link']
                 self.to_screen('Youtube video detected: %s' % yt_id)
index 25fb79e146b18f50962ba506d01560fbd845dbf2..c51a97ce4327cff934216927948587131dedfa80 100644 (file)
@@ -10,7 +10,7 @@ class BloombergIE(InfoExtractor):
 
     _TEST = {
         'url': 'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
-        'md5': '7bf08858ff7c203c870e8a6190e221e5',
+        # The md5 checksum changes
         'info_dict': {
             'id': 'qurhIVlJSB6hzkVi229d8g',
             'ext': 'flv',
@@ -31,8 +31,7 @@ class BloombergIE(InfoExtractor):
         return {
             'id': name.split('-')[-1],
             'title': title,
-            'url': f4m_url,
-            'ext': 'flv',
+            'formats': self._extract_f4m_formats(f4m_url, name),
             'description': self._og_search_description(webpage),
             'thumbnail': self._og_search_thumbnail(webpage),
         }
index 993360714baa6feeb1b276308eca13ba61c0dd4b..86f0c2861e35f296f594a4ac45bbfe74b799d9e0 100644 (file)
@@ -7,12 +7,13 @@ from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
     int_or_none,
+    parse_duration,
 )
 
 
 class BRIE(InfoExtractor):
     IE_DESC = 'Bayerischer Rundfunk Mediathek'
-    _VALID_URL = r'https?://(?:www\.)?br\.de/(?:[a-z0-9\-]+/)+(?P<id>[a-z0-9\-]+)\.html'
+    _VALID_URL = r'https?://(?:www\.)?br\.de/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
     _BASE_URL = 'http://www.br.de'
 
     _TESTS = [
@@ -22,8 +23,9 @@ class BRIE(InfoExtractor):
             'info_dict': {
                 'id': '25e279aa-1ffd-40fd-9955-5325bd48a53a',
                 'ext': 'mp4',
-                'title': 'Am 1. und 2. August in Oberammergau',
-                'description': 'md5:dfd224e5aa6819bc1fcbb7826a932021',
+                'title': 'Wenn das Traditions-Theater wackelt',
+                'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
+                'duration': 34,
             }
         },
         {
@@ -34,6 +36,7 @@ class BRIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Über den Pass',
                 'description': 'Die Eroberung der Alpen: Über den Pass',
+                'duration': 2588,
             }
         },
         {
@@ -44,6 +47,7 @@ class BRIE(InfoExtractor):
                 'ext': 'aac',
                 'title': '"Keine neuen Schulden im nächsten Jahr"',
                 'description': 'Haushaltsentwurf: "Keine neuen Schulden im nächsten Jahr"',
+                'duration': 64,
             }
         },
         {
@@ -54,6 +58,7 @@ class BRIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Umweltbewusster Häuslebauer',
                 'description': 'Uwe Erdelt: Umweltbewusster Häuslebauer',
+                'duration': 116,
             }
         },
         {
@@ -64,6 +69,7 @@ class BRIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Folge 1 - Metaphysik',
                 'description': 'Kant für Anfänger: Folge 1 - Metaphysik',
+                'duration': 893,
                 'uploader': 'Eva Maria Steimle',
                 'upload_date': '20140117',
             }
@@ -84,6 +90,7 @@ class BRIE(InfoExtractor):
             media = {
                 'id': xml_media.get('externalId'),
                 'title': xml_media.find('title').text,
+                'duration': parse_duration(xml_media.find('duration').text),
                 'formats': self._extract_formats(xml_media.find('assets')),
                 'thumbnails': self._extract_thumbnails(xml_media.find('teaserImage/variants')),
                 'description': ' '.join(xml_media.find('shareTitle').text.splitlines()),
index ac0315853626279c5cdd1257ec16ff659946e622..822f9a7be1e1c9df23ca0e8fc164a883f706cba1 100644 (file)
@@ -1,24 +1,42 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 
 
 class CBSIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/video/(?P<id>[^/]+)/.*'
+    _VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/(?:video|artist)/(?P<id>[^/]+)/.*'
 
-    _TEST = {
-        u'url': u'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
-        u'file': u'4JUVEwq3wUT7.flv',
-        u'info_dict': {
-            u'title': u'Connect Chat feat. Garth Brooks',
-            u'description': u'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
-            u'duration': 1495,
+    _TESTS = [{
+        'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
+        'info_dict': {
+            'id': '4JUVEwq3wUT7',
+            'ext': 'flv',
+            'title': 'Connect Chat feat. Garth Brooks',
+            'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
+            'duration': 1495,
+        },
+        'params': {
+            # rtmp download
+            'skip_download': True,
+        },
+        '_skip': 'Blocked outside the US',
+    }, {
+        'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
+        'info_dict': {
+            'id': 'P9gjWjelt6iP',
+            'ext': 'flv',
+            'title': 'Live on Letterman - St. Vincent',
+            'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
+            'duration': 3221,
         },
-        u'params': {
+        'params': {
             # rtmp download
-            u'skip_download': True,
+            'skip_download': True,
         },
-    }
+        '_skip': 'Blocked outside the US',
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -26,5 +44,5 @@ class CBSIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         real_id = self._search_regex(
             r"video\.settings\.pid\s*=\s*'([^']+)';",
-            webpage, u'real video ID')
+            webpage, 'real video ID')
         return self.url_result(u'theplatform:%s' % real_id)
index 02d5ba52713f27412f85989749fde91970e48e36..a62395d4b727ce917f1ea946b63940b3f52b6bdd 100644 (file)
@@ -42,7 +42,7 @@ class ChilloutzoneIE(InfoExtractor):
             'id': '85523671',
             'ext': 'mp4',
             'title': 'The Sunday Times - Icons',
-            'description': 'md5:3e1c0dc6047498d6728dcdaad0891762',
+            'description': 'md5:a5f7ff82e2f7a9ed77473fe666954e84',
             'uploader': 'Us',
             'uploader_id': 'usfilms',
             'upload_date': '20140131'
index a94f42571746f21cc70ba2527e661952caca1c6f..710d5009b71aafe0da901771048b8c0ba68def04 100644 (file)
@@ -43,7 +43,11 @@ class CNETIE(InfoExtractor):
             raise ExtractorError('Cannot find video data')
 
         video_id = vdata['id']
-        title = vdata['headline']
+        title = vdata.get('headline')
+        if title is None:
+            title = vdata.get('title')
+        if title is None:
+            raise ExtractorError('Cannot find title!')
         description = vdata.get('dek')
         thumbnail = vdata.get('image', {}).get('path')
         author = vdata.get('author')
index 8af0abade8c88fea3fa7fc4e7329e10802b43a5a..c81ce5a96f03b539d2f5e98975218fcdd0ed861d 100644 (file)
@@ -14,13 +14,13 @@ from ..utils import (
 
 
 class ComedyCentralIE(MTVServicesInfoExtractor):
-    _VALID_URL = r'''(?x)https?://(?:www\.)?(comedycentral|cc)\.com/
-        (video-clips|episodes|cc-studios|video-collections)
+    _VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
+        (video-clips|episodes|cc-studios|video-collections|full-episodes)
         /(?P<title>.*)'''
     _FEED_URL = 'http://comedycentral.com/feeds/mrss/'
 
     _TEST = {
-        'url': 'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
+        'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
         'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
         'info_dict': {
             'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
index e68657314ecde5406ec2d27fef005f899341daf1..342bfb8b3b53bcb76951613002090be8737bbe29 100644 (file)
@@ -18,6 +18,7 @@ from ..utils import (
     clean_html,
     compiled_regex_type,
     ExtractorError,
+    int_or_none,
     RegexNotFoundError,
     sanitize_filename,
     unescapeHTML,
@@ -69,6 +70,7 @@ class InfoExtractor(object):
                     * vcodec     Name of the video codec in use
                     * container  Name of the container format
                     * filesize   The number of bytes, if known in advance
+                    * filesize_approx  An estimate for the number of bytes
                     * player_url SWF Player URL (used for rtmpdump).
                     * protocol   The protocol that will be used for the actual
                                  download, lower-case.
@@ -300,8 +302,12 @@ class InfoExtractor(object):
     def _download_json(self, url_or_request, video_id,
                        note=u'Downloading JSON metadata',
                        errnote=u'Unable to download JSON metadata',
-                       transform_source=None):
-        json_string = self._download_webpage(url_or_request, video_id, note, errnote)
+                       transform_source=None,
+                       fatal=True):
+        json_string = self._download_webpage(
+            url_or_request, video_id, note, errnote, fatal=fatal)
+        if (not fatal) and json_string is False:
+            return None
         if transform_source:
             json_string = transform_source(json_string)
         try:
@@ -368,7 +374,8 @@ class InfoExtractor(object):
         else:
             for p in pattern:
                 mobj = re.search(p, string, flags)
-                if mobj: break
+                if mobj:
+                    break
 
         if os.name != 'nt' and sys.stderr.isatty():
             _name = u'\033[0;34m%s\033[0m' % name
@@ -468,7 +475,7 @@ class InfoExtractor(object):
             display_name = name
         return self._html_search_regex(
             r'''(?ix)<meta
-                    (?=[^>]+(?:itemprop|name|property)=["\']%s["\'])
+                    (?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
                     [^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
             html, display_name, fatal=fatal, **kwargs)
 
@@ -555,6 +562,7 @@ class InfoExtractor(object):
                 f.get('abr') if f.get('abr') is not None else -1,
                 audio_ext_preference,
                 f.get('filesize') if f.get('filesize') is not None else -1,
+                f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
                 f.get('format_id'),
             )
         formats.sort(key=_formats_key)
@@ -583,6 +591,24 @@ class InfoExtractor(object):
         self.to_screen(msg)
         time.sleep(timeout)
 
+    def _extract_f4m_formats(self, manifest_url, video_id):
+        manifest = self._download_xml(
+            manifest_url, video_id, 'Downloading f4m manifest',
+            'Unable to download f4m manifest')
+
+        formats = []
+        for media_el in manifest.findall('{http://ns.adobe.com/f4m/1.0}media'):
+            formats.append({
+                'url': manifest_url,
+                'ext': 'flv',
+                'tbr': int_or_none(media_el.attrib.get('bitrate')),
+                'width': int_or_none(media_el.attrib.get('width')),
+                'height': int_or_none(media_el.attrib.get('height')),
+            })
+        self._sort_formats(formats)
+
+        return formats
+
 
 class SearchInfoExtractor(InfoExtractor):
     """
diff --git a/youtube_dl/extractor/cracked.py b/youtube_dl/extractor/cracked.py
new file mode 100644 (file)
index 0000000..74b880f
--- /dev/null
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_iso8601,
+    str_to_int,
+)
+
+
+class CrackedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html'
+    _TEST = {
+        'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html',
+        'md5': '4b29a5eeec292cd5eca6388c7558db9e',
+        'info_dict': {
+            'id': '19006',
+            'ext': 'mp4',
+            'title': '4 Plot Holes You Didn\'t Notice in Your Favorite Movies',
+            'description': 'md5:3b909e752661db86007d10e5ec2df769',
+            'timestamp': 1405659600,
+            'upload_date': '20140718',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(
+            [r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'], webpage, 'video URL')
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+
+        timestamp = self._html_search_regex(r'<time datetime="([^"]+)"', webpage, 'upload date', fatal=False)
+        if timestamp:
+            timestamp = parse_iso8601(timestamp[:-6])
+
+        view_count = str_to_int(self._html_search_regex(
+            r'<span class="views" id="viewCounts">([\d,\.]+) Views</span>', webpage, 'view count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'<span id="commentCounts">([\d,\.]+)</span>', webpage, 'comment count', fatal=False))
+
+        m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url)
+        if m:
+            width = int(m.group('width'))
+            height = int(m.group('height'))
+        else:
+            width = height = None
+
+        return {
+            'id': video_id,
+            'url':video_url,
+            'title': title,
+            'description': description,
+            'timestamp': timestamp,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'height': height,
+            'width': width,
+        }
\ No newline at end of file
diff --git a/youtube_dl/extractor/dfb.py b/youtube_dl/extractor/dfb.py
new file mode 100644 (file)
index 0000000..cb8e068
--- /dev/null
@@ -0,0 +1,44 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class DFBIE(InfoExtractor):
+    IE_NAME = 'tv.dfb.de'
+    _VALID_URL = r'https?://tv\.dfb\.de/video/[^/]+/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://tv.dfb.de/video/highlights-des-empfangs-in-berlin/9070/',
+        # The md5 is different each time
+        'info_dict': {
+            'id': '9070',
+            'ext': 'flv',
+            'title': 'Highlights des Empfangs in Berlin',
+            'upload_date': '20140716',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        player_info = self._download_xml(
+            'http://tv.dfb.de/server/hd_video.php?play=%s' % video_id,
+            video_id)
+        video_info = player_info.find('video')
+
+        f4m_info = self._download_xml(video_info.find('url').text, video_id)
+        token_el = f4m_info.find('token')
+        manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] + '&hdcore=3.2.0'
+
+        return {
+            'id': video_id,
+            'title': video_info.find('title').text,
+            'url': manifest_url,
+            'ext': 'flv',
+            'thumbnail': self._og_search_thumbnail(webpage),
+            'upload_date': ''.join(video_info.find('time_date').text.split('.')[::-1]),
+        }
index 41208c97691aafc1c2c96ed06d4a326bca8886a6..9f569aa932967910e12b46c0d0269557437d0c79 100644 (file)
@@ -5,24 +5,26 @@ import os.path
 import re
 
 from .common import InfoExtractor
+from ..utils import compat_urllib_parse_unquote
 
 
 class DropboxIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
     _TEST = {
-        'url': 'https://www.dropbox.com/s/0qr9sai2veej4f8/THE_DOCTOR_GAMES.mp4',
-        'md5': '8ae17c51172fb7f93bdd6a214cc8c896',
+        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4',
+        'md5': '8a3d905427a6951ccb9eb292f154530b',
         'info_dict': {
-            'id': '0qr9sai2veej4f8',
+            'id': 'nelirfsxnmcfbfh',
             'ext': 'mp4',
-            'title': 'THE_DOCTOR_GAMES'
+            'title': 'youtube-dl test video \'ä"BaW_jenozKc'
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
-        title = os.path.splitext(mobj.group('title'))[0]
+        fn = compat_urllib_parse_unquote(mobj.group('title'))
+        title = os.path.splitext(fn)[0]
         video_url = url + '?dl=1'
 
         return {
index d26145db1cc399e1202ef5ca41ce5a167e4bdaeb..6d73c8a4a32f83975025a0b1ed932fc291176f8a 100644 (file)
@@ -8,7 +8,6 @@ from ..utils import (
     ExtractorError,
     compat_urllib_parse,
     compat_urllib_request,
-    determine_ext,
 )
 
 
index f3e0f38b7200a70c897dd561b45a275cf42f7193..1b0e8e5d59dc23d52d7fb15d7e46e0b1383a7435 100644 (file)
@@ -19,17 +19,35 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
             + video_id, video_id, 'Downloading XML config')
 
         manifest_url = info.find('videos/video/url').text
-        video_url = manifest_url.replace('manifest.f4m', 'index_2_av.m3u8')
-        video_url = video_url.replace('/z/', '/i/')
+        manifest_url = manifest_url.replace('/z/', '/i/')
+        
+        if manifest_url.startswith('rtmp'):
+            formats = [{'url': manifest_url, 'ext': 'flv'}]
+        else:
+            formats = []
+            available_formats = self._search_regex(r'/[^,]*,(.*?),k\.mp4', manifest_url, 'available formats')
+            for index, format_descr in enumerate(available_formats.split(',')):
+                format_info = {
+                    'url': manifest_url.replace('manifest.f4m', 'index_%d_av.m3u8' % index),
+                    'ext': 'mp4',
+                }
+                m_resolution = re.search(r'(?P<width>\d+)x(?P<height>\d+)', format_descr)
+                if m_resolution is not None:
+                    format_info.update({
+                        'width': int(m_resolution.group('width')),
+                        'height': int(m_resolution.group('height')),
+                    })
+                formats.append(format_info)
+
         thumbnail_path = info.find('image').text
 
-        return {'id': video_id,
-                'ext': 'flv' if video_url.startswith('rtmp') else 'mp4',
-                'url': video_url,
-                'title': info.find('titre').text,
-                'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
-                'description': info.find('synopsis').text,
-                }
+        return {
+            'id': video_id,
+            'title': info.find('titre').text,
+            'formats': formats,
+            'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
+            'description': info.find('synopsis').text,
+        }
 
 
 class PluzzIE(FranceTVBaseInfoExtractor):
@@ -48,7 +66,7 @@ class PluzzIE(FranceTVBaseInfoExtractor):
 
 class FranceTvInfoIE(FranceTVBaseInfoExtractor):
     IE_NAME = 'francetvinfo.fr'
-    _VALID_URL = r'https?://www\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
+    _VALID_URL = r'https?://(?:www|mobile)\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
 
     _TESTS = [{
         'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
@@ -211,7 +229,7 @@ class GenerationQuoiIE(InfoExtractor):
 
 class CultureboxIE(FranceTVBaseInfoExtractor):
     IE_NAME = 'culturebox.francetvinfo.fr'
-    _VALID_URL = r'https?://culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
+    _VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
 
     _TEST = {
         'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
index 6e6b6666003d0837bffd6c25ddd12fe1ce892e50..721e5fce011e113bf8c413543df496fc3eeca17d 100644 (file)
@@ -26,7 +26,7 @@ class FunnyOrDieIE(InfoExtractor):
             'id': 'e402820827',
             'ext': 'mp4',
             'title': 'Please Use This Song (Jon Lajoie)',
-            'description': 'md5:2ed27d364f5a805a6dba199faaf6681d',
+            'description': 'Please use this to sell something.  www.jonlajoie.com',
             'thumbnail': 're:^http:.*\.jpg$',
         },
     }]
diff --git a/youtube_dl/extractor/gamestar.py b/youtube_dl/extractor/gamestar.py
new file mode 100644 (file)
index 0000000..50f8fc7
--- /dev/null
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    parse_duration,
+    str_to_int,
+    unified_strdate,
+)
+
+
+class GameStarIE(InfoExtractor):
+    _VALID_URL = r'http://www\.gamestar\.de/videos/.*,(?P<id>[0-9]+)\.html'
+    _TEST = {
+        'url': 'http://www.gamestar.de/videos/trailer,3/hobbit-3-die-schlacht-der-fuenf-heere,76110.html',
+        'md5': '96974ecbb7fd8d0d20fca5a00810cea7',
+        'info_dict': {
+            'id': '76110',
+            'ext': 'mp4',
+            'title': 'Hobbit 3: Die Schlacht der Fünf Heere - Teaser-Trailer zum dritten Teil',
+            'description': 'Der Teaser-Trailer zu Hobbit 3: Die Schlacht der Fünf Heere zeigt einige Szenen aus dem dritten Teil der Saga und kündigt den vollständigen Trailer an.',
+            'thumbnail': 'http://images.gamestar.de/images/idgwpgsgp/bdb/2494525/600x.jpg',
+            'upload_date': '20140728',
+            'duration': 17
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        og_title = self._og_search_title(webpage)
+        title = og_title.replace(' - Video bei GameStar.de', '').strip()
+
+        url = 'http://gamestar.de/_misc/videos/portal/getVideoUrl.cfm?premium=0&videoId=' + video_id
+
+        description = self._og_search_description(webpage).strip()
+
+        thumbnail = self._proto_relative_url(
+            self._og_search_thumbnail(webpage), scheme='http:')
+
+        upload_date = unified_strdate(self._html_search_regex(
+            r'<span style="float:left;font-size:11px;">Datum: ([0-9]+\.[0-9]+\.[0-9]+)&nbsp;&nbsp;',
+            webpage, 'upload_date', fatal=False))
+
+        duration = parse_duration(self._html_search_regex(
+            r'&nbsp;&nbsp;Länge: ([0-9]+:[0-9]+)</span>', webpage, 'duration',
+            fatal=False))
+
+        view_count = str_to_int(self._html_search_regex(
+            r'&nbsp;&nbsp;Zuschauer: ([0-9\.]+)&nbsp;&nbsp;', webpage,
+            'view_count', fatal=False))
+
+        comment_count = int_or_none(self._html_search_regex(
+            r'>Kommentieren \(([0-9]+)\)</a>', webpage, 'comment_count',
+            fatal=False))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': url,
+            'ext': 'mp4',
+            'thumbnail': thumbnail,
+            'description': description,
+            'upload_date': upload_date,
+            'duration': duration,
+            'view_count': view_count,
+            'comment_count': comment_count
+        }
index 89d5994eef02623222b73fa04e0287a264b01333..de14ae1fb1edd0600488b8f04c7b400bf310ef5a 100644 (file)
@@ -8,6 +8,7 @@ from ..utils import (
     compat_urllib_request,
 )
 
+
 class GDCVaultIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
     _TESTS = [
@@ -31,6 +32,15 @@ class GDCVaultIE(InfoExtractor):
                 'skip_download': True,  # Requires rtmpdump
             }
         },
+        {
+            'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
+            'md5': 'a5eb77996ef82118afbbe8e48731b98e',
+            'info_dict': {
+                'id': '1015301',
+                'ext': 'flv',
+                'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
+            }
+        }
     ]
 
     def _parse_mp4(self, xml_description):
@@ -103,18 +113,40 @@ class GDCVaultIE(InfoExtractor):
         webpage_url = 'http://www.gdcvault.com/play/' + video_id
         start_page = self._download_webpage(webpage_url, video_id)
 
-        xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root', None, False)
+        direct_url = self._search_regex(
+            r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
+            start_page, 'url', default=None)
+        if direct_url:
+            video_url = 'http://www.gdcvault.com/' + direct_url
+            title = self._html_search_regex(
+                r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
+                start_page, 'title')
+
+            return {
+                'id': video_id,
+                'url': video_url,
+                'ext': 'flv',
+                'title': title,
+            }
 
+        xml_root = self._html_search_regex(
+            r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>',
+            start_page, 'xml root', default=None)
         if xml_root is None:
             # Probably need to authenticate
-            start_page = self._login(webpage_url, video_id)
-            if start_page is None:
+            login_res = self._login(webpage_url, video_id)
+            if login_res is None:
                 self.report_warning('Could not login.')
             else:
+                start_page = login_res
                 # Grab the url from the authenticated page
-                xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root')
+                xml_root = self._html_search_regex(
+                    r'<iframe src="(.*?)player.html.*?".*?</iframe>',
+                    start_page, 'xml root')
 
-        xml_name = self._html_search_regex(r'<iframe src=".*?\?xml=(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', None, False)
+        xml_name = self._html_search_regex(
+            r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
+            start_page, 'xml filename', default=None)
         if xml_name is None:
             # Fallback to the older format
             xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
index f97b59845706b8e33d4438b5cab968a2251b1ad9..bcb0765940df39656be9f78c1ab144976adb5e5e 100644 (file)
@@ -383,13 +383,13 @@ class GenericIE(InfoExtractor):
         if not parsed_url.scheme:
             default_search = self._downloader.params.get('default_search')
             if default_search is None:
-                default_search = 'error'
+                default_search = 'fixup_error'
 
-            if default_search in ('auto', 'auto_warning'):
+            if default_search in ('auto', 'auto_warning', 'fixup_error'):
                 if '/' in url:
                     self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
                     return self.url_result('http://' + url)
-                else:
+                elif default_search != 'fixup_error':
                     if default_search == 'auto_warning':
                         if re.match(r'^(?:url|URL)$', url):
                             raise ExtractorError(
@@ -399,10 +399,11 @@ class GenericIE(InfoExtractor):
                             self._downloader.report_warning(
                                 'Falling back to youtube search for  %s . Set --default-search "auto" to suppress this warning.' % url)
                     return self.url_result('ytsearch:' + url)
-            elif default_search == 'error':
+
+            if default_search in ('error', 'fixup_error'):
                 raise ExtractorError(
                     ('%r is not a valid URL. '
-                     'Set --default-search "ytseach" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
+                     'Set --default-search "ytsearch" (or run  youtube-dl "ytsearch:%s" ) to search YouTube'
                     ) % (url, url), expected=True)
             else:
                 assert ':' in default_search
diff --git a/youtube_dl/extractor/godtube.py b/youtube_dl/extractor/godtube.py
new file mode 100644 (file)
index 0000000..73bd6d8
--- /dev/null
@@ -0,0 +1,58 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    parse_iso8601,
+)
+
+
+class GodTubeIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?godtube\.com/watch/\?v=(?P<id>[\da-zA-Z]+)'
+    _TESTS = [
+        {
+            'url': 'https://www.godtube.com/watch/?v=0C0CNNNU',
+            'md5': '77108c1e4ab58f48031101a1a2119789',
+            'info_dict': {
+                'id': '0C0CNNNU',
+                'ext': 'mp4',
+                'title': 'Woman at the well.',
+                'duration': 159,
+                'timestamp': 1205712000,
+                'uploader': 'beverlybmusic',
+                'upload_date': '20080317',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        config = self._download_xml(
+            'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
+            video_id, 'Downloading player config XML')
+
+        video_url = config.find('.//file').text
+        uploader = config.find('.//author').text
+        timestamp = parse_iso8601(config.find('.//date').text)
+        duration = parse_duration(config.find('.//duration').text)
+        thumbnail = config.find('.//image').text
+
+        media = self._download_xml(
+            'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
+
+        title = media.find('.//title').text
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'uploader': uploader,
+            'duration': duration,
+        }
diff --git a/youtube_dl/extractor/izlesene.py b/youtube_dl/extractor/izlesene.py
new file mode 100644 (file)
index 0000000..79e8430
--- /dev/null
@@ -0,0 +1,97 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    get_element_by_id,
+    parse_iso8601,
+    determine_ext,
+    int_or_none,
+    str_to_int,
+)
+
+
+class IzleseneIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:(?:www|m)\.)?izlesene\.com/(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)'
+    _STREAM_URL = 'http://panel.izlesene.com/api/streamurl/{id:}/{format:}'
+    _TEST = {
+        'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
+        'md5': '4384f9f0ea65086734b881085ee05ac2',
+        'info_dict': {
+            'id': '7599694',
+            'ext': 'mp4',
+            'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
+            'description': 'Annesi oğluna doğum günü hediyesi olarak minecraft cd si alıyor, ve çocuk hunharca seviniyor',
+            'thumbnail': 're:^http://.*\.jpg',
+            'uploader_id': 'pelikzzle',
+            'timestamp': 1404298698,
+            'upload_date': '20140702',
+            'duration': 95.395,
+            'age_limit': 0,
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        url = 'http://www.izlesene.com/video/%s' % video_id
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+
+        uploader = self._html_search_regex(
+            r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False, default='')
+        timestamp = parse_iso8601(self._html_search_meta(
+            'uploadDate', webpage, 'upload date', fatal=False))
+
+        duration = int_or_none(self._html_search_regex(
+            r'"videoduration"\s*:\s*"([^"]+)"', webpage, 'duration', fatal=False))
+        if duration:
+            duration /= 1000.0
+
+        view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
+        comment_count = self._html_search_regex(
+            r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'uploader', fatal=False)
+
+        family_friendly = self._html_search_meta(
+            'isFamilyFriendly', webpage, 'age limit', fatal=False)
+
+        content_url = self._html_search_meta(
+            'contentURL', webpage, 'content URL', fatal=False)
+        ext = determine_ext(content_url, 'mp4')
+
+        # Might be empty for some videos.
+        qualities = self._html_search_regex(
+            r'"quality"\s*:\s*"([^"]+)"', webpage, 'qualities', fatal=False, default='')
+
+        formats = []
+        for quality in qualities.split('|'):
+            json = self._download_json(
+                self._STREAM_URL.format(id=video_id, format=quality), video_id,
+                note='Getting video URL for "%s" quality' % quality,
+                errnote='Failed to get video URL for "%s" quality' % quality
+            )
+            formats.append({
+                'url': json.get('streamurl'),
+                'ext': ext,
+                'format_id': '%sp' % quality if quality else 'sd',
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader_id': uploader,
+            'timestamp': timestamp,
+            'duration': duration,
+            'view_count': int_or_none(view_count),
+            'comment_count': int_or_none(comment_count),
+            'age_limit': 18 if family_friendly == 'False' else 0,
+            'formats': formats,
+        }
index 7083db12ea012720f5dfda7039fdad9e21c12cc9..27017e89f632880c21643c0b58f04d23e963fd39 100644 (file)
@@ -1,5 +1,6 @@
 from __future__ import unicode_literals
 
+import itertools
 import json
 import os
 import re
@@ -43,10 +44,11 @@ class JustinTVIE(InfoExtractor):
     }
 
     # Return count of items, list of *valid* items
-    def _parse_page(self, url, video_id):
-        info_json = self._download_webpage(url, video_id,
-                                           'Downloading video info JSON',
-                                           'unable to download video info JSON')
+    def _parse_page(self, url, video_id, counter):
+        info_json = self._download_webpage(
+            url, video_id,
+            'Downloading video info JSON on page %d' % counter,
+            'Unable to download video info JSON %d' % counter)
 
         response = json.loads(info_json)
         if type(response) != list:
@@ -138,11 +140,10 @@ class JustinTVIE(InfoExtractor):
         entries = []
         offset = 0
         limit = self._JUSTIN_PAGE_LIMIT
-        while True:
-            if paged:
-                self.report_download_page(video_id, offset)
+        for counter in itertools.count(1):
             page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
-            page_count, page_info = self._parse_page(page_url, video_id)
+            page_count, page_info = self._parse_page(
+                page_url, video_id, counter)
             entries.extend(page_info)
             if not paged or page_count != limit:
                 break
index 961dd1aa6459380c60b1b32e39a2e58dd3cb9a52..56a76380cad6f45cb4a0a33581803f1371b2543b 100644 (file)
@@ -8,7 +8,7 @@ from .common import InfoExtractor
 
 class KickStarterIE(InfoExtractor):
     _VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
-    _TEST = {
+    _TESTS = [{
         'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
         'md5': 'c81addca81327ffa66c642b5d8b08cab',
         'info_dict': {
@@ -18,22 +18,45 @@ class KickStarterIE(InfoExtractor):
             'description': 'A unique motocross documentary that examines the '
                 'life and mind of one of sports most elite athletes: Josh Grant.',
         },
-    }
+    }, {
+        'note': 'Embedded video (not using the native kickstarter video service)',
+        'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
+        'playlist': [
+            {
+                'info_dict': {
+                    'id': '78704821',
+                    'ext': 'mp4',
+                    'uploader_id': 'pebble',
+                    'uploader': 'Pebble Technology',
+                    'title': 'Pebble iOS Notifications',
+                }
+            }
+        ],
+    }]
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
         video_id = m.group('id')
         webpage = self._download_webpage(url, video_id)
 
-        video_url = self._search_regex(r'data-video-url="(.*?)"',
-            webpage, 'video URL')
-        video_title = self._html_search_regex(r'<title>(.*?)</title>',
-            webpage, 'title').rpartition('— Kickstarter')[0].strip()
+        title = self._html_search_regex(
+            r'<title>\s*(.*?)(?:\s*&mdash; Kickstarter)?\s*</title>',
+            webpage, 'title')
+        video_url = self._search_regex(
+            r'data-video-url="(.*?)"',
+            webpage, 'video URL', default=None)
+        if video_url is None:  # No native kickstarter, look for embedded videos
+            return {
+                '_type': 'url_transparent',
+                'ie_key': 'Generic',
+                'url': url,
+                'title': title,
+            }
 
         return {
             'id': video_id,
             'url': video_url,
-            'title': video_title,
+            'title': title,
             'description': self._og_search_description(webpage),
             'thumbnail': self._og_search_thumbnail(webpage),
         }
diff --git a/youtube_dl/extractor/krasview.py b/youtube_dl/extractor/krasview.py
new file mode 100644 (file)
index 0000000..6f3d234
--- /dev/null
@@ -0,0 +1,59 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    unescapeHTML,
+)
+
+
+class KrasViewIE(InfoExtractor):
+    IE_DESC = 'Красвью'
+    _VALID_URL = r'https?://krasview\.ru/video/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://krasview.ru/video/512228',
+        'md5': '3b91003cf85fc5db277870c8ebd98eae',
+        'info_dict': {
+            'id': '512228',
+            'ext': 'mp4',
+            'title': 'Снег, лёд, заносы',
+            'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.',
+            'duration': 27,
+            'thumbnail': 're:^https?://.*\.jpg',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        flashvars = json.loads(self._search_regex(
+            r'flashvars\s*:\s*({.+?})\s*}\);', webpage, 'flashvars'))
+
+        video_url = flashvars['url']
+        title = unescapeHTML(flashvars['title'])
+        description = unescapeHTML(flashvars.get('subtitle') or self._og_search_description(webpage, default=None))
+        thumbnail = flashvars['image']
+        duration = int(flashvars['duration'])
+        filesize = int(flashvars['size'])
+        width = int_or_none(self._og_search_property('video:width', webpage, 'video width'))
+        height = int_or_none(self._og_search_property('video:height', webpage, 'video height'))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'filesize': filesize,
+            'width': width,
+            'height': height,
+        }
index 2c100d424650fed5d98330b1e5124df117296c75..c0c2d9b09d92c8ce79894112fd58834f0f1b92a1 100644 (file)
@@ -5,11 +5,14 @@ import json
 
 from .common import InfoExtractor
 from ..utils import (
+    compat_str,
     compat_urllib_parse_urlparse,
     compat_urlparse,
-    xpath_with_ns,
-    compat_str,
+    ExtractorError,
+    find_xpath_attr,
+    int_or_none,
     orderedSet,
+    xpath_with_ns,
 )
 
 
@@ -24,18 +27,82 @@ class LivestreamIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Live from Webster Hall NYC',
             'upload_date': '20121012',
+            'like_count': int,
+            'view_count': int,
+            'thumbnail': 're:^http://.*\.jpg$'
         }
     }
 
+    def _parse_smil(self, video_id, smil_url):
+        formats = []
+        _SWITCH_XPATH = (
+            './/{http://www.w3.org/2001/SMIL20/Language}body/'
+            '{http://www.w3.org/2001/SMIL20/Language}switch')
+        smil_doc = self._download_xml(
+            smil_url, video_id,
+            note='Downloading SMIL information',
+            errnote='Unable to download SMIL information',
+            fatal=False)
+        if smil_doc is False:  # Download failed
+            return formats
+        title_node = find_xpath_attr(
+            smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
+            'name', 'title')
+        if title_node is None:
+            self.report_warning('Cannot find SMIL id')
+            switch_node = smil_doc.find(_SWITCH_XPATH)
+        else:
+            title_id = title_node.attrib['content']
+            switch_node = find_xpath_attr(
+                smil_doc, _SWITCH_XPATH, 'id', title_id)
+        if switch_node is None:
+            raise ExtractorError('Cannot find switch node')
+        video_nodes = switch_node.findall(
+            '{http://www.w3.org/2001/SMIL20/Language}video')
+
+        for vn in video_nodes:
+            tbr = int_or_none(vn.attrib.get('system-bitrate'))
+            furl = (
+                'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
+                (vn.attrib['src']))
+            if 'clipBegin' in vn.attrib:
+                furl += '&ssek=' + vn.attrib['clipBegin']
+            formats.append({
+                'url': furl,
+                'format_id': 'smil_%d' % tbr,
+                'ext': 'flv',
+                'tbr': tbr,
+                'preference': -1000,
+            })
+        return formats
+
     def _extract_video_info(self, video_data):
-        video_url = video_data.get('progressive_url_hd') or video_data.get('progressive_url')
+        video_id = compat_str(video_data['id'])
+
+        FORMAT_KEYS = (
+            ('sd', 'progressive_url'),
+            ('hd', 'progressive_url_hd'),
+        )
+        formats = [{
+            'format_id': format_id,
+            'url': video_data[key],
+            'quality': i + 1,
+        } for i, (format_id, key) in enumerate(FORMAT_KEYS)
+            if video_data.get(key)]
+
+        smil_url = video_data.get('smil_url')
+        if smil_url:
+            formats.extend(self._parse_smil(video_id, smil_url))
+        self._sort_formats(formats)
+
         return {
-            'id': compat_str(video_data['id']),
-            'url': video_url,
-            'ext': 'mp4',
+            'id': video_id,
+            'formats': formats,
             'title': video_data['caption'],
-            'thumbnail': video_data['thumbnail_url'],
+            'thumbnail': video_data.get('thumbnail_url'),
             'upload_date': video_data['updated_at'].replace('-', '')[:8],
+            'like_count': video_data.get('likes', {}).get('total'),
+            'view_count': video_data.get('views'),
         }
 
     def _real_extract(self, url):
@@ -50,7 +117,8 @@ class LivestreamIE(InfoExtractor):
                 r'window.config = ({.*?});', webpage, 'window config')
             info = json.loads(config_json)['event']
             videos = [self._extract_video_info(video_data['data'])
-                for video_data in info['feed']['data'] if video_data['type'] == 'video']
+                for video_data in info['feed']['data']
+                if video_data['type'] == 'video']
             return self.playlist_result(videos, info['id'], info['full_name'])
         else:
             og_video = self._og_search_video_url(webpage, 'player url')
diff --git a/youtube_dl/extractor/mlb.py b/youtube_dl/extractor/mlb.py
new file mode 100644 (file)
index 0000000..37c72bc
--- /dev/null
@@ -0,0 +1,116 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    parse_iso8601,
+    find_xpath_attr,
+)
+
+
+class MLBIE(InfoExtractor):
+    _VALID_URL = r'https?://m\.mlb\.com/(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v(?P<id>n?\d+)'
+    _TESTS = [
+        {
+            'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
+            'md5': 'ff56a598c2cf411a9a38a69709e97079',
+            'info_dict': {
+                'id': '34698933',
+                'ext': 'mp4',
+                'title': "Ackley's spectacular catch",
+                'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
+                'duration': 66,
+                'timestamp': 1405980600,
+                'upload_date': '20140721',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',
+            'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',
+            'info_dict': {
+                'id': '34496663',
+                'ext': 'mp4',
+                'title': 'Stanton prepares for Derby',
+                'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',
+                'duration': 46,
+                'timestamp': 1405105800,
+                'upload_date': '20140711',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',
+            'md5': '0e6e73d509321e142409b695eadd541f',
+            'info_dict': {
+                'id': '34578115',
+                'ext': 'mp4',
+                'title': 'Cespedes repeats as Derby champ',
+                'description': 'md5:08df253ce265d4cf6fb09f581fafad07',
+                'duration': 488,
+                'timestamp': 1405399936,
+                'upload_date': '20140715',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+        {
+            'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',
+            'md5': 'b8fd237347b844365d74ea61d4245967',
+            'info_dict': {
+                'id': '34577915',
+                'ext': 'mp4',
+                'title': 'Bautista on Home Run Derby',
+                'description': 'md5:b80b34031143d0986dddc64a8839f0fb',
+                'duration': 52,
+                'timestamp': 1405390722,
+                'upload_date': '20140715',
+                'thumbnail': 're:^https?://.*\.jpg$',
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        detail = self._download_xml(
+            'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'
+            % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)
+
+        title = detail.find('./headline').text
+        description = detail.find('./big-blurb').text
+        duration = parse_duration(detail.find('./duration').text)
+        timestamp = parse_iso8601(detail.attrib['date'][:-5])
+
+        thumbnail = find_xpath_attr(
+            detail, './thumbnailScenarios/thumbnailScenario', 'type', '45').text
+
+        formats = []
+        for media_url in detail.findall('./url'):
+            playback_scenario = media_url.attrib['playback_scenario']
+            fmt = {
+                'url': media_url.text,
+                'format_id': playback_scenario,
+            }
+            m = re.search(r'(?P<vbr>\d+)K_(?P<width>\d+)X(?P<height>\d+)', playback_scenario)
+            if m:
+                fmt.update({
+                    'vbr': int(m.group('vbr')) * 1000,
+                    'width': int(m.group('width')),
+                    'height': int(m.group('height')),
+                })
+            formats.append(fmt)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'timestamp': timestamp,
+            'formats': formats,
+            'thumbnail': thumbnail,
+        }
index aa34665d1669f32ab31a02618c58ef9c4b130fe2..d2e4acbada5b99c5c3eac4fe6b966ce77dab1ef9 100644 (file)
@@ -4,7 +4,11 @@ import re
 import json
 
 from .common import InfoExtractor
-from ..utils import find_xpath_attr, compat_str
+from ..utils import (
+    compat_str,
+    ExtractorError,
+    find_xpath_attr,
+)
 
 
 class NBCIE(InfoExtractor):
@@ -85,11 +89,25 @@ class NBCNewsIE(InfoExtractor):
                 flags=re.MULTILINE)
             bootstrap = json.loads(bootstrap_json)
             info = bootstrap['results'][0]['video']
-            playlist_url = info['fallbackPlaylistUrl'] + '?form=MPXNBCNewsAPI'
             mpxid = info['mpxId']
-            all_videos = self._download_json(playlist_url, title)['videos']
-            # The response contains additional videos
-            info = next(v for v in all_videos if v['mpxId'] == mpxid)
+
+            base_urls = [
+                info['fallbackPlaylistUrl'],
+                info['associatedPlaylistUrl'],
+            ]
+
+            for base_url in base_urls:
+                playlist_url = base_url + '?form=MPXNBCNewsAPI'
+                all_videos = self._download_json(playlist_url, title)['videos']
+
+                try:
+                    info = next(v for v in all_videos if v['mpxId'] == mpxid)
+                    break
+                except StopIteration:
+                    continue
+
+            if info is None:
+                raise ExtractorError('Could not find video in playlists')
 
             return {
                 '_type': 'url',
index fbcbe1f40c3c637c205686c09d7131c94f2771e6..12e85a716fec900cf01d72157ab4159bc69ae8f8 100644 (file)
@@ -32,7 +32,7 @@ class NPOIE(InfoExtractor):
             'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
             video_id,
             # We have to remove the javascript callback
-            transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//epc', r'\1', j)
+            transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j)
         )
         token_page = self._download_webpage(
             'http://ida.omroep.nl/npoplayer/i.js',
index 64cded70789249746a5e2b6604d86563a6ad499c..ec95d070411f97e1dad2fde881c9e5f847caafc8 100644 (file)
@@ -32,13 +32,21 @@ class PBSIE(InfoExtractor):
         },
     }
 
-    def _real_extract(self, url):
+    def _extract_ids(self, url):
         mobj = re.match(self._VALID_URL, url)
 
         presumptive_id = mobj.group('presumptive_id')
         display_id = presumptive_id
         if presumptive_id:
             webpage = self._download_webpage(url, display_id)
+
+            # frontline video embed
+            media_id = self._search_regex(
+                r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'",
+                webpage, 'frontline video ID', fatal=False, default=None)
+            if media_id:
+                return media_id, presumptive_id
+
             url = self._search_regex(
                 r'<iframe\s+id=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
                 webpage, 'player URL')
@@ -57,6 +65,11 @@ class PBSIE(InfoExtractor):
             video_id = mobj.group('id')
             display_id = video_id
 
+        return video_id, display_id
+
+    def _real_extract(self, url):
+        video_id, display_id = self._extract_ids(url)
+
         info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
         info = self._download_json(info_url, display_id)
 
index 4295cf93a75188844bfe0838789e2efcb4363229..d1e12dd8d5a6699ba3caa8d041c8bc039e996fc7 100644 (file)
@@ -35,9 +35,7 @@ class RedTubeIE(InfoExtractor):
             r'<h1 class="videoTitle[^"]*">(.+?)</h1>',
             webpage, u'title')
 
-        video_thumbnail = self._html_search_regex(
-            r'playerInnerHTML.+?<img\s+src="(.+?)"',
-            webpage, u'thumbnail', fatal=False)
+        video_thumbnail = self._og_search_thumbnail(webpage)
 
         # No self-labeling, but they describe themselves as
         # "Home of Videos Porno"
index 205f8a167601f9f7c6a20ccf721439a7b070516f..dce64e1517003015722db1097ac83b106cc91136 100644 (file)
@@ -30,7 +30,7 @@ class RTBFIE(InfoExtractor):
         page = self._download_webpage('https://www.rtbf.be/video/embed?id=%s' % video_id, video_id)
 
         data = json.loads(self._html_search_regex(
-            r'<div class="js-player-embed" data-video="([^"]+)"', page, 'data video'))['data']
+            r'<div class="js-player-embed(?: player-embed)?" data-video="([^"]+)"', page, 'data video'))['data']
 
         video_url = data.get('downloadUrl') or data.get('url')
 
index 4835ec5ecada755a12d7003fed6355adfd6936a6..a45884b251fa355e04c65de554f0e9cbfb5406bb 100644 (file)
@@ -92,16 +92,7 @@ class RTLnowIE(InfoExtractor):
         },
         {
             'url': 'http://www.n-tvnow.de/deluxe-alles-was-spass-macht/thema-ua-luxushotel-fuer-vierbeiner.php?container_id=153819&player=1&season=0',
-            'info_dict': {
-                'id': '153819',
-                'ext': 'flv',
-                'title': 'Deluxe - Alles was Spaß macht - Thema u.a.: Luxushotel für Vierbeiner',
-                'description': 'md5:c3705e1bb32e1a5b2bcd634fc065c631',
-                'thumbnail': 'http://autoimg.static-fra.de/ntvnow/383157/1500x1500/image2.jpg',
-                'upload_date': '20140221',
-                'duration': 2429,
-            },
-            'skip': 'Only works from Germany',
+            'only_matching': True,
         },
     ]
 
index 77fd08ddec09c11d518fc502c509f321ffd525df..c2228b2f0f6a1fc9bba02cddcb5a1740cc85038d 100644 (file)
@@ -17,7 +17,7 @@ class RTVEALaCartaIE(InfoExtractor):
 
     _TEST = {
         'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/',
-        'md5': '18fcd45965bdd076efdb12cd7f6d7b9e',
+        'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43',
         'info_dict': {
             'id': '2491869',
             'ext': 'mp4',
diff --git a/youtube_dl/extractor/sapo.py b/youtube_dl/extractor/sapo.py
new file mode 100644 (file)
index 0000000..172cc12
--- /dev/null
@@ -0,0 +1,119 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    parse_duration,
+    unified_strdate,
+)
+
+
+class SapoIE(InfoExtractor):
+    IE_DESC = 'SAPO Vídeos'
+    _VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})'
+
+    _TESTS = [
+        {
+            'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi',
+            'md5': '79ee523f6ecb9233ac25075dee0eda83',
+            'note': 'SD video',
+            'info_dict': {
+                'id': 'UBz95kOtiWYUMTA5Ghfi',
+                'ext': 'mp4',
+                'title': 'Benfica - Marcas na Hitória',
+                'description': 'md5:c9082000a128c3fd57bf0299e1367f22',
+                'duration': 264,
+                'uploader': 'tiago_1988',
+                'upload_date': '20080229',
+                'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'],
+            },
+        },
+        {
+            'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF',
+            'md5': '90a2f283cfb49193fe06e861613a72aa',
+            'note': 'HD video',
+            'info_dict': {
+                'id': 'IyusNAZ791ZdoCY5H5IF',
+                'ext': 'mp4',
+                'title': 'Codebits VII - Report',
+                'description': 'md5:6448d6fd81ce86feac05321f354dbdc8',
+                'duration': 144,
+                'uploader': 'codebits',
+                'upload_date': '20140427',
+                'categories': ['codebits', 'codebits2014'],
+            },
+        },
+        {
+            'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz',
+            'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac',
+            'note': 'v2 video',
+            'info_dict': {
+                'id': 'yLqjzPtbTimsn2wWBKHz',
+                'ext': 'mp4',
+                'title': 'Hipnose Condicionativa 4',
+                'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40',
+                'duration': 692,
+                'uploader': 'sapozen',
+                'upload_date': '20090609',
+                'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'],
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        item = self._download_xml(
+            'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item')
+
+        title = item.find('./title').text
+        description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text
+        thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url')
+        duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text)
+        uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text
+        upload_date = unified_strdate(item.find('./pubDate').text)
+        view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text)
+        comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text)
+        tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text
+        categories = tags.split() if tags else []
+        age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0
+
+        video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text
+        video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x')
+
+        formats = [{
+            'url': video_url,
+            'ext': 'mp4',
+            'format_id': 'sd',
+            'width': int(video_size[0]),
+            'height': int(video_size[1]),
+        }]
+
+        if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true':
+            formats.append({
+                'url': re.sub(r'/mov/1$', '/mov/39', video_url),
+                'ext': 'mp4',
+                'format_id': 'hd',
+                'width': 1280,
+                'height': 720,
+            })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'duration': duration,
+            'uploader': uploader,
+            'upload_date': upload_date,
+            'view_count': view_count,
+            'comment_count': comment_count,
+            'categories': categories,
+            'age_limit': age_limit,
+            'formats': formats,
+        }
index 198a08c1c9ea1032b130903cd1c013f6ed22c31e..5b7367b94119792661506624264b121503cc6858 100644 (file)
@@ -20,7 +20,7 @@ class SaveFromIE(InfoExtractor):
             'upload_date': '20120816',
             'uploader': 'Howcast',
             'uploader_id': 'Howcast',
-            'description': 'md5:4f0aac94361a12e1ce57d74f85265175',
+            'description': 're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*',
         },
         'params': {
             'skip_download': True
diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py
new file mode 100644 (file)
index 0000000..8607482
--- /dev/null
@@ -0,0 +1,57 @@
+from __future__ import unicode_literals
+
+import re
+import base64
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urllib_request,
+    compat_urllib_parse,
+    int_or_none,
+)
+
+
+class SharedIE(InfoExtractor):
+    _VALID_URL = r'http://shared\.sx/(?P<id>[\da-z]{10})'
+
+    _TEST = {
+        'url': 'http://shared.sx/0060718775',
+        'md5': '53e1c58fc3e777ae1dfe9e57ba2f9c72',
+        'info_dict': {
+            'id': '0060718775',
+            'ext': 'mp4',
+            'title': 'Big Buck Bunny Trailer',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        page = self._download_webpage(url, video_id)
+
+        if re.search(r'>File does not exist<', page) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
+        download_form = dict(re.findall(r'<input type="hidden" name="([^"]+)" value="([^"]*)"', page))
+
+        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(download_form))
+        request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+
+        video_page = self._download_webpage(request, video_id, 'Downloading video page')
+
+        video_url = self._html_search_regex(r'data-url="([^"]+)"', video_page, 'video URL')
+        title = base64.b64decode(self._html_search_meta('full:title', page, 'title')).decode('utf-8')
+        filesize = int_or_none(self._html_search_meta('full:size', page, 'file size', fatal=False))
+        thumbnail = self._html_search_regex(
+            r'data-poster="([^"]+)"', video_page, 'thumbnail', fatal=False, default=None)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'filesize': filesize,
+            'title': title,
+            'thumbnail': thumbnail,
+        }
\ No newline at end of file
diff --git a/youtube_dl/extractor/snotr.py b/youtube_dl/extractor/snotr.py
new file mode 100644 (file)
index 0000000..da3b05a
--- /dev/null
@@ -0,0 +1,68 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    float_or_none,
+    str_to_int,
+    parse_duration,
+)
+
+
+class SnotrIE(InfoExtractor):
+    _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)'
+    _TESTS = [{
+        'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks',
+        'info_dict': {
+            'id': '13708',
+            'ext': 'flv',
+            'title': 'Drone flying through fireworks!',
+            'duration': 247,
+            'filesize_approx': 98566144,
+            'description': 'A drone flying through Fourth of July Fireworks',
+        }
+    }, {
+        'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10',
+        'info_dict': {
+            'id': '530',
+            'ext': 'flv',
+            'title': 'David Letteman - George W. Bush Top 10',
+            'duration': 126,
+            'filesize_approx': 8912896,
+            'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!',
+        }
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        title = self._og_search_title(webpage)
+
+        description = self._og_search_description(webpage)
+        video_url = "http://cdn.videos.snotr.com/%s.flv" % video_id
+
+        view_count = str_to_int(self._html_search_regex(
+            r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>',
+            webpage, 'view count', fatal=False))
+
+        duration = parse_duration(self._html_search_regex(
+            r'<p>\n<strong>Length:</strong>\n\s*([0-9:]+).*?</p>',
+            webpage, 'duration', fatal=False))
+
+        filesize_approx = float_or_none(self._html_search_regex(
+            r'<p>\n<strong>Filesize:</strong>\n\s*([0-9.]+)\s*megabyte</p>',
+            webpage, 'filesize', fatal=False), invscale=1024 * 1024)
+
+        return {
+            'id': video_id,
+            'description': description,
+            'title': title,
+            'url': video_url,
+            'view_count': view_count,
+            'duration': duration,
+            'filesize_approx': filesize_approx,
+        }
diff --git a/youtube_dl/extractor/sockshare.py b/youtube_dl/extractor/sockshare.py
new file mode 100644 (file)
index 0000000..dc9f805
--- /dev/null
@@ -0,0 +1,80 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from ..utils import (
+    ExtractorError,
+    compat_urllib_parse,
+    compat_urllib_request,
+    determine_ext,
+)
+import re
+
+from .common import InfoExtractor
+
+
+class SockshareIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?sockshare\.com/file/(?P<id>[0-9A-Za-z]+)'
+    _FILE_DELETED_REGEX = r'This file doesn\'t exist, or has been removed\.</div>'
+    _TEST = {
+        'url': 'http://www.sockshare.com/file/437BE28B89D799D7',
+        'md5': '9d0bf1cfb6dbeaa8d562f6c97506c5bd',
+        'info_dict': {
+            'id': '437BE28B89D799D7',
+            'title': 'big_buck_bunny_720p_surround.avi',
+            'ext': 'avi',
+            'thumbnail': 're:^http://.*\.jpg$',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        url = 'http://sockshare.com/file/%s' % video_id
+        webpage = self._download_webpage(url, video_id)
+
+        if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+            raise ExtractorError('Video %s does not exist' % video_id,
+                                 expected=True)
+
+        confirm_hash = self._html_search_regex(r'''(?x)<input\s+
+            type="hidden"\s+
+            value="([^"]*)"\s+
+            name="hash"
+            ''', webpage, 'hash')
+
+        fields = {
+            "hash": confirm_hash,
+            "confirm": "Continue as Free User"
+        }
+
+        post = compat_urllib_parse.urlencode(fields)
+        req = compat_urllib_request.Request(url, post)
+        # Apparently, this header is required for confirmation to work.
+        req.add_header('Host', 'www.sockshare.com')
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+        webpage = self._download_webpage(
+            req, video_id, 'Downloading video page')
+
+        video_url = self._html_search_regex(
+            r'<a href="([^"]*)".+class="download_file_link"',
+            webpage, 'file url')
+        video_url = "http://www.sockshare.com" + video_url
+        title = self._html_search_regex(r'<h1>(.+)<strong>', webpage, 'title')
+        thumbnail = self._html_search_regex(
+            r'<img\s+src="([^"]*)".+?name="bg"',
+            webpage, 'thumbnail')
+
+        formats = [{
+            'format_id': 'sd',
+            'url': video_url,
+            'ext': determine_ext(title),
+        }]
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
index 8a77c13709e16f9f65dbd3111dafb1fc9ae8451a..097d0e418d452a968cdf0355419b02c4dd392081 100644 (file)
@@ -82,10 +82,10 @@ class SoundcloudIE(InfoExtractor):
         # downloadable song
         {
             'url': 'https://soundcloud.com/oddsamples/bus-brakes',
-            'md5': 'fee7b8747b09bb755cefd4b853e7249a',
+            'md5': '7624f2351f8a3b2e7cd51522496e7631',
             'info_dict': {
                 'id': '128590877',
-                'ext': 'wav',
+                'ext': 'mp3',
                 'title': 'Bus Brakes',
                 'description': 'md5:0170be75dd395c96025d210d261c784e',
                 'uploader': 'oddsamples',
index af689e2c20411ef4e8ce1badc82a9d24f9a6da31..183dcb03cccb61a2f843d5c1b511050fc4bce75d 100644 (file)
@@ -53,7 +53,7 @@ class SteamIE(InfoExtractor):
             'ext': 'mp4',
             'upload_date': '20140329',
             'title': 'FRONTIERS - Final Greenlight Trailer',
-            'description': 'md5:6df4fe8dd494ae811869672b0767e025',
+            'description': 'md5:dc96a773669d0ca1b36c13c1f30250d9',
             'uploader': 'AAD Productions',
             'uploader_id': 'AtomicAgeDogGames',
         }
index 9faf3a5e3f677ae8b00454c492f6ef2bf129d329..172def221e1277298dc355a2cfdbea3ae4f9fdce 100644 (file)
@@ -1,4 +1,6 @@
 # coding: utf-8
+from __future__ import unicode_literals
+
 import re
 import time
 
@@ -10,18 +12,18 @@ from ..utils import (
 
 
 class StreamcloudIE(InfoExtractor):
-    IE_NAME = u'streamcloud.eu'
+    IE_NAME = 'streamcloud.eu'
     _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html'
 
     _TEST = {
-        u'url': u'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
-        u'file': u'skp9j99s4bpz.mp4',
-        u'md5': u'6bea4c7fa5daaacc2a946b7146286686',
-        u'info_dict': {
-            u'title': u'youtube-dl test video  \'/\\ ä ↭',
-            u'duration': 9,
+        'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
+        'md5': '6bea4c7fa5daaacc2a946b7146286686',
+        'info_dict': {
+            'id': 'skp9j99s4bpz',
+            'ext': 'mp4',
+            'title': 'youtube-dl test video  \'/\\ ä ↭',
         },
-        u'skip': u'Only available from the EU'
+        'skip': 'Only available from the EU'
     }
 
     def _real_extract(self, url):
@@ -46,21 +48,17 @@ class StreamcloudIE(InfoExtractor):
         req = compat_urllib_request.Request(url, post, headers)
 
         webpage = self._download_webpage(
-            req, video_id, note=u'Downloading video page ...')
+            req, video_id, note='Downloading video page ...')
         title = self._html_search_regex(
-            r'<h1[^>]*>([^<]+)<', webpage, u'title')
+            r'<h1[^>]*>([^<]+)<', webpage, 'title')
         video_url = self._search_regex(
-            r'file:\s*"([^"]+)"', webpage, u'video URL')
-        duration_str = self._search_regex(
-            r'duration:\s*"?([0-9]+)"?', webpage, u'duration', fatal=False)
-        duration = None if duration_str is None else int(duration_str)
+            r'file:\s*"([^"]+)"', webpage, 'video URL')
         thumbnail = self._search_regex(
-            r'image:\s*"([^"]+)"', webpage, u'thumbnail URL', fatal=False)
+            r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)
 
         return {
             'id': video_id,
             'title': title,
             'url': video_url,
-            'duration': duration,
             'thumbnail': thumbnail,
         }
index 6c688c5202804986b6a0d6d154cb986e18789073..5d9d703673265ca4a53a54f28e34494d570cb206 100644 (file)
@@ -8,7 +8,7 @@ from ..utils import parse_duration
 
 
 class SWRMediathekIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+    _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
 
     _TESTS = [{
         'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
@@ -52,6 +52,20 @@ class SWRMediathekIE(InfoExtractor):
             'uploader': 'SWR 2',
             'uploader_id': '284670',
         }
+    }, {
+        'url': 'http://swrmediathek.de/content/player.htm?show=52dc7e00-15c5-11e4-84bc-0026b975f2e6',
+        'md5': '881531487d0633080a8cc88d31ef896f',
+        'info_dict': {
+            'id': '52dc7e00-15c5-11e4-84bc-0026b975f2e6',
+            'ext': 'mp4',
+            'title': 'Familienspaß am Bodensee',
+            'description': 'md5:0b591225a32cfde7be1629ed49fe4315',
+            'thumbnail': 're:http://.*\.jpg',
+            'duration': 1784,
+            'upload_date': '20140727',
+            'uploader': 'SWR Fernsehen BW',
+            'uploader_id': '281130',
+        }
     }]
 
     def _real_extract(self, url):
index 25b9864add9dc8422a5948111d25ea8243e10441..b870474515ba61ee33641c86554d53d68a6bf46d 100644 (file)
@@ -19,16 +19,6 @@ class TagesschauIE(InfoExtractor):
             'description': 'md5:69da3c61275b426426d711bde96463ab',
             'thumbnail': 're:^http:.*\.jpg$',
         },
-    }, {
-        'url': 'http://www.tagesschau.de/multimedia/video/video-5964.html',
-        'md5': '66652566900963a3f962333579eeffcf',
-        'info_dict': {
-            'id': '5964',
-            'ext': 'mp4',
-            'title': 'Nahost-Konflikt: Israel bombadiert Ziele im Gazastreifen und Westjordanland',
-            'description': 'md5:07bfc78c48eec3145ed4805299a1900a',
-            'thumbnail': 're:http://.*\.jpg',
-        },
     }]
 
     _FORMATS = {
index 2c2113b1404fb3631126636bdb38ba839008a404..46d727d1de6743edcb99109b77caa49ebc1bf0c6 100644 (file)
@@ -62,7 +62,7 @@ class TeacherTubeIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
-        title = self._html_search_meta('title', webpage, 'title')
+        title = self._html_search_meta('title', webpage, 'title', fatal=True)
         TITLE_SUFFIX = ' - TeacherTube'
         if title.endswith(TITLE_SUFFIX):
             title = title[:-len(TITLE_SUFFIX)].strip()
@@ -101,7 +101,11 @@ class TeacherTubeUserIE(InfoExtractor):
 
     _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
 
-    _MEDIA_RE = r'(?s)"sidebar_thumb_time">[0-9:]+</div>.+?<a href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)">'
+    _MEDIA_RE = r'''(?sx)
+        class="?sidebar_thumb_time"?>[0-9:]+</div>
+        \s*
+        <a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
+    '''
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -111,14 +115,12 @@ class TeacherTubeUserIE(InfoExtractor):
         webpage = self._download_webpage(url, user_id)
         urls.extend(re.findall(self._MEDIA_RE, webpage))
         
-        pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[1:-1]
+        pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
         for p in pages:
             more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
-            webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages) + 1))
-            urls.extend(re.findall(self._MEDIA_RE, webpage))
-
-        entries = []
-        for url in urls:
-            entries.append(self.url_result(url, 'TeacherTube'))
+            webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages)))
+            video_urls = re.findall(self._MEDIA_RE, webpage)
+            urls.extend(video_urls)
 
+        entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
         return self.playlist_result(entries, user_id)
index 8477840fc65ad377f96219033fc656bb6676f52d..81ba169fbec68c9bd7fea395c8bb135d73b3e828 100644 (file)
@@ -1,8 +1,6 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 
 
diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
new file mode 100644 (file)
index 0000000..a56a7ab
--- /dev/null
@@ -0,0 +1,85 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    parse_iso8601,
+    qualities,
+)
+
+
+class TVPlayIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?tvplay\.lv/parraides/[^/]+/(?P<id>\d+)'
+    _TESTS = [
+        {
+            'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
+            'info_dict': {
+                'id': '418113',
+                'ext': 'flv',
+                'title': 'Kādi ir īri? - Viņas melo labāk',
+                'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
+                'duration': 25,
+                'timestamp': 1406097056,
+                'upload_date': '20140723',
+            },
+            'params': {
+                # rtmp download
+                'skip_download': True,
+            },
+        },
+    ]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        video = self._download_json(
+            'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
+
+        if video['is_geo_blocked']:
+            raise ExtractorError(
+                'This content is not available in your country due to copyright reasons', expected=True)
+
+        streams = self._download_json(
+            'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON')
+
+        quality = qualities(['hls', 'medium', 'high'])
+        formats = []
+        for format_id, video_url in streams['streams'].items():
+            if not video_url:
+                continue
+            fmt = {
+                'format_id': format_id,
+                'preference': quality(format_id),
+            }
+            if video_url.startswith('rtmp'):
+                m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
+                if not m:
+                    continue
+                fmt.update({
+                    'ext': 'flv',
+                    'url': m.group('url'),
+                    'app': m.group('app'),
+                    'play_path': m.group('playpath'),
+                })
+            else:
+                fmt.update({
+                    'url': video_url,
+                })
+            formats.append(fmt)
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': video['title'],
+            'description': video['description'],
+            'duration': video['duration'],
+            'timestamp': parse_iso8601(video['created_at']),
+            'view_count': video['views']['total'],
+            'age_limit': video.get('age_limit', 0),
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/ubu.py b/youtube_dl/extractor/ubu.py
new file mode 100644 (file)
index 0000000..0182d67
--- /dev/null
@@ -0,0 +1,56 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class UbuIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?ubu\.com/film/(?P<id>[\da-z_-]+)\.html'
+    _TEST = {
+        'url': 'http://ubu.com/film/her_noise.html',
+        'md5': '8edd46ee8aa6b265fb5ed6cf05c36bc9',
+        'info_dict': {
+            'id': 'her_noise',
+            'ext': 'mp4',
+            'title': 'Her Noise - The Making Of (2007)',
+            'duration': 3600,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._html_search_regex(
+            r'<title>.+?Film &amp; Video: ([^<]+)</title>', webpage, 'title')
+
+        duration = int_or_none(self._html_search_regex(
+            r'Duration: (\d+) minutes', webpage, 'duration', fatal=False, default=None))
+        if duration:
+            duration *= 60
+
+        formats = []
+
+        FORMAT_REGEXES = [
+            ['sq', r"'flashvars'\s*,\s*'file=([^']+)'"],
+            ['hq', r'href="(http://ubumexico\.centro\.org\.mx/video/[^"]+)"']
+        ]
+
+        for format_id, format_regex in FORMAT_REGEXES:
+            m = re.search(format_regex, webpage)
+            if m:
+                formats.append({
+                    'url': m.group(1),
+                    'format_id': format_id,
+                })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'formats': formats,
+        }
index eada13ce920b9f4e892f952242ef87bfac504600..d2ffd1b6ba893f2cb2cc50f00a3131a835dba97d 100644 (file)
@@ -177,6 +177,7 @@ class VevoIE(InfoExtractor):
             self._downloader.report_warning(
                 'Cannot download SMIL information, falling back to JSON ..')
 
+        self._sort_formats(formats)
         timestamp_ms = int(self._search_regex(
             r'/Date\((\d+)\)/', video_info['launchDate'], 'launch date'))
 
diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py
new file mode 100644 (file)
index 0000000..5c89824
--- /dev/null
@@ -0,0 +1,68 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    float_or_none,
+    str_to_int,
+)
+
+
+class VidmeIE(InfoExtractor):
+    _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
+    _TEST = {
+        'url': 'https://vid.me/QNB',
+        'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+        'info_dict': {
+            'id': 'QNB',
+            'ext': 'mp4',
+            'title': 'Fishing for piranha - the easy way',
+            'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
+            'duration': 119.92,
+            'timestamp': 1406313244,
+            'upload_date': '20140725',
+            'thumbnail': 're:^https?://.*\.jpg',
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(r'<source src="([^"]+)"', webpage, 'video URL')
+
+        title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage, default='')
+        thumbnail = self._og_search_thumbnail(webpage)
+        timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False))
+        width = int_or_none(self._og_search_property('video:width', webpage, fatal=False))
+        height = int_or_none(self._og_search_property('video:height', webpage, fatal=False))
+        duration = float_or_none(self._html_search_regex(
+            r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
+        view_count = str_to_int(self._html_search_regex(
+            r'<span class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False))
+        like_count = str_to_int(self._html_search_regex(
+            r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
+            webpage, 'like count', fatal=False))
+        comment_count = str_to_int(self._html_search_regex(
+            r'class="js-comment-count"[^>]+data-count="([\d,\.\s]+)">',
+            webpage, 'comment count', fatal=False))
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'timestamp': timestamp,
+            'width': width,
+            'height': height,
+            'duration': duration,
+            'view_count': view_count,
+            'like_count': like_count,
+            'comment_count': comment_count,
+        }
index 255855558cf64ddfe847db56e00e029f4bbbdf22..11c7d7e817f1f0839604311534dd918b5b5e4fee 100644 (file)
@@ -98,7 +98,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
             'info_dict': {
                 'id': '54469442',
                 'ext': 'mp4',
-                'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software',
+                'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
                 'uploader': 'The BLN & Business of Software',
                 'uploader_id': 'theblnbusinessofsoftware',
                 'duration': 3610,
@@ -121,6 +121,21 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
                 'videopassword': 'youtube-dl',
             },
         },
+        {
+            'url': 'http://vimeo.com/channels/keypeele/75629013',
+            'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
+            'note': 'Video is freely available via original URL '
+                    'and protected with password when accessed via http://vimeo.com/75629013',
+            'info_dict': {
+                'id': '75629013',
+                'ext': 'mp4',
+                'title': 'Key & Peele: Terrorist Interrogation',
+                'description': 'md5:8678b246399b070816b12313e8b4eb5c',
+                'uploader_id': 'atencio',
+                'uploader': 'Peter Atencio',
+                'duration': 187,
+            },
+        },
         {
             'url': 'http://vimeo.com/76979871',
             'md5': '3363dd6ffebe3784d56f4132317fd446',
@@ -196,8 +211,6 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
         video_id = mobj.group('id')
         if mobj.group('pro') or mobj.group('player'):
             url = 'http://player.vimeo.com/video/' + video_id
-        else:
-            url = 'https://vimeo.com/' + video_id
 
         # Retrieve video webpage to extract further information
         request = compat_urllib_request.Request(url, None, headers)
@@ -263,7 +276,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
         if video_thumbnail is None:
             video_thumbs = config["video"].get("thumbs")
             if video_thumbs and isinstance(video_thumbs, dict):
-                _, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in video_thumbs.items())[-1]
+                _, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
 
         # Extract video description
         video_description = None
index 68c59364bc7f48e453b85e70ecc750b4418f0a75..6d3b78749eaf1b25d23422343ab36c9d84c7cdc0 100644 (file)
@@ -10,7 +10,7 @@ from ..utils import (
 
 
 class VodlockerIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?vodlocker.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
+    _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
 
     _TESTS = [{
         'url': 'http://vodlocker.com/e8wvyzz4sl42',
index 7b77865cb172f6b46cb86561a5ae022d0263a95d..f1b9e9a19d05d9026feb24b6f22d395cd3990e5f 100644 (file)
@@ -1,5 +1,6 @@
 from __future__ import unicode_literals
 
+import json
 import re
 
 from .common import InfoExtractor
@@ -20,12 +21,14 @@ class VubeIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Chiara Grispo - Price Tag by Jessie J',
                 'description': 'md5:8ea652a1f36818352428cb5134933313',
-                'thumbnail': 'http://frame.thestaticvube.com/snap/228x128/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f.jpg',
+                'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$',
                 'uploader': 'Chiara.Grispo',
-                'uploader_id': '1u3hX0znhP',
                 'timestamp': 1388743358,
                 'upload_date': '20140103',
-                'duration': 170.56
+                'duration': 170.56,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
             }
         },
         {
@@ -36,12 +39,30 @@ class VubeIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
                 'description': 'md5:40bcacb97796339f1690642c21d56f4a',
-                'thumbnail': 'http://frame.thestaticvube.com/snap/228x128/102265d5a9f-0f17-4f6b-5753-adf08484ee1e.jpg',
+                'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$',
                 'uploader': 'Seraina',
-                'uploader_id': 'XU9VE2BQ2q',
                 'timestamp': 1396492438,
                 'upload_date': '20140403',
-                'duration': 240.107
+                'duration': 240.107,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
+            }
+        }, {
+            'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
+            'md5': '0584fc13b50f887127d9d1007589d27f',
+            'info_dict': {
+                'id': '0nmsMY5vEq',
+                'ext': 'mp4',
+                'title': 'Frozen - Let It Go Cover by Siren Gene',
+                'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
+                'uploader': 'Siren Gene',
+                'uploader_id': 'Siren',
+                'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
+                'duration': 221.788,
+                'like_count': int,
+                'dislike_count': int,
+                'comment_count': int,
             }
         }
     ]
@@ -50,8 +71,16 @@ class VubeIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
-        video = self._download_json(
-            'http://vube.com/api/v2/video/%s' % video_id, video_id, 'Downloading video JSON')
+        webpage = self._download_webpage(url, video_id)
+        data_json = self._search_regex(
+            r'(?s)window\["(?:tapiVideoData|vubeOriginalVideoData)"\]\s*=\s*(\{.*?\n});\n',
+            webpage, 'video data'
+        )
+        data = json.loads(data_json)
+        video = (
+            data.get('video') or
+            data)
+        assert isinstance(video, dict)
 
         public_id = video['public_id']
 
@@ -69,21 +98,31 @@ class VubeIE(InfoExtractor):
 
         title = video['title']
         description = video.get('description')
-        thumbnail = video['thumbnail_src']
-        if thumbnail.startswith('//'):
-            thumbnail = 'http:' + thumbnail
-        uploader = video['user_alias']
-        uploader_id = video['user_url_id']
-        timestamp = int(video['upload_time'])
+        thumbnail = self._proto_relative_url(
+            video.get('thumbnail') or video.get('thumbnail_src'),
+            scheme='http:')
+        uploader = data.get('user', {}).get('channel', {}).get('name') or video.get('user_alias')
+        uploader_id = data.get('user', {}).get('name')
+        timestamp = int_or_none(video.get('upload_time'))
         duration = video['duration']
         view_count = video.get('raw_view_count')
-        like_count = video.get('total_likes')
-        dislike_count= video.get('total_hates')
-
-        comment = self._download_json(
-            'http://vube.com/api/video/%s/comment' % video_id, video_id, 'Downloading video comment JSON')
+        like_count = video.get('rlikes')
+        if like_count is None:
+            like_count = video.get('total_likes')
+        dislike_count = video.get('rhates')
+        if dislike_count is None:
+            dislike_count = video.get('total_hates')
 
-        comment_count = int_or_none(comment.get('total'))
+        comments = video.get('comments')
+        comment_count = None
+        if comments is None:
+            comment_data = self._download_json(
+                'http://vube.com/api/video/%s/comment' % video_id,
+                video_id, 'Downloading video comment JSON', fatal=False)
+            if comment_data is not None:
+                comment_count = int_or_none(comment_data.get('total'))
+        else:
+            comment_count = len(comments)
 
         return {
             'id': video_id,
index f741ba54007737e132f327178be283a478b6527f..54d37da618960d46d977ff4b97682ca0c7b2a99c 100644 (file)
@@ -55,14 +55,14 @@ class WDRIE(InfoExtractor):
             },
         },
         {
-            'url': 'http://www.funkhauseuropa.de/av/audiosuepersongsoulbossanova100-audioplayer.html',
-            'md5': '24e83813e832badb0a8d7d1ef9ef0691',
+            'url': 'http://www.funkhauseuropa.de/av/audioflaviacoelhoamaramar100-audioplayer.html',
+            'md5': '99a1443ff29af19f6c52cf6f4dc1f4aa',
             'info_dict': {
-                'id': 'mdb-463528',
+                'id': 'mdb-478135',
                 'ext': 'mp3',
-                'title': 'Süpersong: Soul Bossa Nova',
+                'title': 'Flavia Coelho: Amar é Amar',
                 'description': 'md5:7b29e97e10dfb6e265238b32fa35b23a',
-                'upload_date': '20140630',
+                'upload_date': '20140717',
             },
         },
     ]
@@ -81,7 +81,7 @@ class WDRIE(InfoExtractor):
             ]
             return self.playlist_result(entries, page_id)
 
-        flashvars = compat_urlparse.parse_qs(
+        flashvars = compat_parse_qs(
             self._html_search_regex(r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars'))
 
         page_id = flashvars['trackerClipId'][0]
index 6123e12564b7934032ed619b672b6277a75bace0..3c9b0b58402189338b5480d1d20f589b751e6dfe 100644 (file)
@@ -1,19 +1,17 @@
 # coding: utf-8
 
-import collections
 import errno
 import io
 import itertools
 import json
 import os.path
 import re
-import struct
 import traceback
-import zlib
 
 from .common import InfoExtractor, SearchInfoExtractor
 from .subtitles import SubtitlesInfoExtractor
 from ..jsinterp import JSInterpreter
+from ..swfinterp import SWFInterpreter
 from ..utils import (
     compat_chr,
     compat_parse_qs,
@@ -346,14 +344,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         """Indicate the download will use the RTMP protocol."""
         self.to_screen(u'RTMP download detected')
 
-    def _extract_signature_function(self, video_id, player_url, slen):
-        id_m = re.match(r'.*-(?P<id>[a-zA-Z0-9_-]+)\.(?P<ext>[a-z]+)$',
-                        player_url)
+    def _signature_cache_id(self, example_sig):
+        """ Return a string representation of a signature """
+        return u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))
+
+    def _extract_signature_function(self, video_id, player_url, example_sig):
+        id_m = re.match(
+            r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
+            player_url)
+        if not id_m:
+            raise ExtractorError('Cannot identify player %r' % player_url)
         player_type = id_m.group('ext')
         player_id = id_m.group('id')
 
         # Read from filesystem cache
-        func_id = '%s_%s_%d' % (player_type, player_id, slen)
+        func_id = '%s_%s_%s' % (
+            player_type, player_id, self._signature_cache_id(example_sig))
         assert os.path.basename(func_id) == func_id
         cache_dir = get_cachedir(self._downloader.params)
 
@@ -387,7 +393,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
         if cache_enabled:
             try:
-                test_string = u''.join(map(compat_chr, range(slen)))
+                test_string = u''.join(map(compat_chr, range(len(example_sig))))
                 cache_res = res(test_string)
                 cache_spec = [ord(c) for c in cache_res]
                 try:
@@ -403,7 +409,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
         return res
 
-    def _print_sig_code(self, func, slen):
+    def _print_sig_code(self, func, example_sig):
         def gen_sig_code(idxs):
             def _genslice(start, end, step):
                 starts = u'' if start == 0 else str(start)
@@ -432,11 +438,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             else:
                 yield _genslice(start, i, step)
 
-        test_string = u''.join(map(compat_chr, range(slen)))
+        test_string = u''.join(map(compat_chr, range(len(example_sig))))
         cache_res = func(test_string)
         cache_spec = [ord(c) for c in cache_res]
         expr_code = u' + '.join(gen_sig_code(cache_spec))
-        code = u'if len(s) == %d:\n    return %s\n' % (slen, expr_code)
+        signature_id_tuple = '(%s)' % (
+            ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
+        code = (u'if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
+                u'    return %s\n') % (signature_id_tuple, expr_code)
         self.to_screen(u'Extracted signature function:\n' + code)
 
     def _parse_sig_js(self, jscode):
@@ -449,417 +458,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         return lambda s: initial_function([s])
 
     def _parse_sig_swf(self, file_contents):
-        if file_contents[1:3] != b'WS':
-            raise ExtractorError(
-                u'Not an SWF file; header is %r' % file_contents[:3])
-        if file_contents[:1] == b'C':
-            content = zlib.decompress(file_contents[8:])
-        else:
-            raise NotImplementedError(u'Unsupported compression format %r' %
-                                      file_contents[:1])
-
-        def extract_tags(content):
-            pos = 0
-            while pos < len(content):
-                header16 = struct.unpack('<H', content[pos:pos+2])[0]
-                pos += 2
-                tag_code = header16 >> 6
-                tag_len = header16 & 0x3f
-                if tag_len == 0x3f:
-                    tag_len = struct.unpack('<I', content[pos:pos+4])[0]
-                    pos += 4
-                assert pos+tag_len <= len(content)
-                yield (tag_code, content[pos:pos+tag_len])
-                pos += tag_len
-
-        code_tag = next(tag
-                        for tag_code, tag in extract_tags(content)
-                        if tag_code == 82)
-        p = code_tag.index(b'\0', 4) + 1
-        code_reader = io.BytesIO(code_tag[p:])
-
-        # Parse ABC (AVM2 ByteCode)
-        def read_int(reader=None):
-            if reader is None:
-                reader = code_reader
-            res = 0
-            shift = 0
-            for _ in range(5):
-                buf = reader.read(1)
-                assert len(buf) == 1
-                b = struct.unpack('<B', buf)[0]
-                res = res | ((b & 0x7f) << shift)
-                if b & 0x80 == 0:
-                    break
-                shift += 7
-            return res
-
-        def u30(reader=None):
-            res = read_int(reader)
-            assert res & 0xf0000000 == 0
-            return res
-        u32 = read_int
-
-        def s32(reader=None):
-            v = read_int(reader)
-            if v & 0x80000000 != 0:
-                v = - ((v ^ 0xffffffff) + 1)
-            return v
-
-        def read_string(reader=None):
-            if reader is None:
-                reader = code_reader
-            slen = u30(reader)
-            resb = reader.read(slen)
-            assert len(resb) == slen
-            return resb.decode('utf-8')
-
-        def read_bytes(count, reader=None):
-            if reader is None:
-                reader = code_reader
-            resb = reader.read(count)
-            assert len(resb) == count
-            return resb
-
-        def read_byte(reader=None):
-            resb = read_bytes(1, reader=reader)
-            res = struct.unpack('<B', resb)[0]
-            return res
-
-        # minor_version + major_version
-        read_bytes(2 + 2)
-
-        # Constant pool
-        int_count = u30()
-        for _c in range(1, int_count):
-            s32()
-        uint_count = u30()
-        for _c in range(1, uint_count):
-            u32()
-        double_count = u30()
-        read_bytes((double_count-1) * 8)
-        string_count = u30()
-        constant_strings = [u'']
-        for _c in range(1, string_count):
-            s = read_string()
-            constant_strings.append(s)
-        namespace_count = u30()
-        for _c in range(1, namespace_count):
-            read_bytes(1)  # kind
-            u30()  # name
-        ns_set_count = u30()
-        for _c in range(1, ns_set_count):
-            count = u30()
-            for _c2 in range(count):
-                u30()
-        multiname_count = u30()
-        MULTINAME_SIZES = {
-            0x07: 2,  # QName
-            0x0d: 2,  # QNameA
-            0x0f: 1,  # RTQName
-            0x10: 1,  # RTQNameA
-            0x11: 0,  # RTQNameL
-            0x12: 0,  # RTQNameLA
-            0x09: 2,  # Multiname
-            0x0e: 2,  # MultinameA
-            0x1b: 1,  # MultinameL
-            0x1c: 1,  # MultinameLA
-        }
-        multinames = [u'']
-        for _c in range(1, multiname_count):
-            kind = u30()
-            assert kind in MULTINAME_SIZES, u'Invalid multiname kind %r' % kind
-            if kind == 0x07:
-                u30()  # namespace_idx
-                name_idx = u30()
-                multinames.append(constant_strings[name_idx])
-            else:
-                multinames.append('[MULTINAME kind: %d]' % kind)
-                for _c2 in range(MULTINAME_SIZES[kind]):
-                    u30()
-
-        # Methods
-        method_count = u30()
-        MethodInfo = collections.namedtuple(
-            'MethodInfo',
-            ['NEED_ARGUMENTS', 'NEED_REST'])
-        method_infos = []
-        for method_id in range(method_count):
-            param_count = u30()
-            u30()  # return type
-            for _ in range(param_count):
-                u30()  # param type
-            u30()  # name index (always 0 for youtube)
-            flags = read_byte()
-            if flags & 0x08 != 0:
-                # Options present
-                option_count = u30()
-                for c in range(option_count):
-                    u30()  # val
-                    read_bytes(1)  # kind
-            if flags & 0x80 != 0:
-                # Param names present
-                for _ in range(param_count):
-                    u30()  # param name
-            mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0)
-            method_infos.append(mi)
-
-        # Metadata
-        metadata_count = u30()
-        for _c in range(metadata_count):
-            u30()  # name
-            item_count = u30()
-            for _c2 in range(item_count):
-                u30()  # key
-                u30()  # value
-
-        def parse_traits_info():
-            trait_name_idx = u30()
-            kind_full = read_byte()
-            kind = kind_full & 0x0f
-            attrs = kind_full >> 4
-            methods = {}
-            if kind in [0x00, 0x06]:  # Slot or Const
-                u30()  # Slot id
-                u30()  # type_name_idx
-                vindex = u30()
-                if vindex != 0:
-                    read_byte()  # vkind
-            elif kind in [0x01, 0x02, 0x03]:  # Method / Getter / Setter
-                u30()  # disp_id
-                method_idx = u30()
-                methods[multinames[trait_name_idx]] = method_idx
-            elif kind == 0x04:  # Class
-                u30()  # slot_id
-                u30()  # classi
-            elif kind == 0x05:  # Function
-                u30()  # slot_id
-                function_idx = u30()
-                methods[function_idx] = multinames[trait_name_idx]
-            else:
-                raise ExtractorError(u'Unsupported trait kind %d' % kind)
-
-            if attrs & 0x4 != 0:  # Metadata present
-                metadata_count = u30()
-                for _c3 in range(metadata_count):
-                    u30()  # metadata index
-
-            return methods
-
-        # Classes
+        swfi = SWFInterpreter(file_contents)
         TARGET_CLASSNAME = u'SignatureDecipher'
-        searched_idx = multinames.index(TARGET_CLASSNAME)
-        searched_class_id = None
-        class_count = u30()
-        for class_id in range(class_count):
-            name_idx = u30()
-            if name_idx == searched_idx:
-                # We found the class we're looking for!
-                searched_class_id = class_id
-            u30()  # super_name idx
-            flags = read_byte()
-            if flags & 0x08 != 0:  # Protected namespace is present
-                u30()  # protected_ns_idx
-            intrf_count = u30()
-            for _c2 in range(intrf_count):
-                u30()
-            u30()  # iinit
-            trait_count = u30()
-            for _c2 in range(trait_count):
-                parse_traits_info()
-
-        if searched_class_id is None:
-            raise ExtractorError(u'Target class %r not found' %
-                                 TARGET_CLASSNAME)
-
-        method_names = {}
-        method_idxs = {}
-        for class_id in range(class_count):
-            u30()  # cinit
-            trait_count = u30()
-            for _c2 in range(trait_count):
-                trait_methods = parse_traits_info()
-                if class_id == searched_class_id:
-                    method_names.update(trait_methods.items())
-                    method_idxs.update(dict(
-                        (idx, name)
-                        for name, idx in trait_methods.items()))
-
-        # Scripts
-        script_count = u30()
-        for _c in range(script_count):
-            u30()  # init
-            trait_count = u30()
-            for _c2 in range(trait_count):
-                parse_traits_info()
-
-        # Method bodies
-        method_body_count = u30()
-        Method = collections.namedtuple('Method', ['code', 'local_count'])
-        methods = {}
-        for _c in range(method_body_count):
-            method_idx = u30()
-            u30()  # max_stack
-            local_count = u30()
-            u30()  # init_scope_depth
-            u30()  # max_scope_depth
-            code_length = u30()
-            code = read_bytes(code_length)
-            if method_idx in method_idxs:
-                m = Method(code, local_count)
-                methods[method_idxs[method_idx]] = m
-            exception_count = u30()
-            for _c2 in range(exception_count):
-                u30()  # from
-                u30()  # to
-                u30()  # target
-                u30()  # exc_type
-                u30()  # var_name
-            trait_count = u30()
-            for _c2 in range(trait_count):
-                parse_traits_info()
-
-        assert p + code_reader.tell() == len(code_tag)
-        assert len(methods) == len(method_idxs)
-
-        method_pyfunctions = {}
-
-        def extract_function(func_name):
-            if func_name in method_pyfunctions:
-                return method_pyfunctions[func_name]
-            if func_name not in methods:
-                raise ExtractorError(u'Cannot find function %r' % func_name)
-            m = methods[func_name]
-
-            def resfunc(args):
-                registers = ['(this)'] + list(args) + [None] * m.local_count
-                stack = []
-                coder = io.BytesIO(m.code)
-                while True:
-                    opcode = struct.unpack('!B', coder.read(1))[0]
-                    if opcode == 36:  # pushbyte
-                        v = struct.unpack('!B', coder.read(1))[0]
-                        stack.append(v)
-                    elif opcode == 44:  # pushstring
-                        idx = u30(coder)
-                        stack.append(constant_strings[idx])
-                    elif opcode == 48:  # pushscope
-                        # We don't implement the scope register, so we'll just
-                        # ignore the popped value
-                        stack.pop()
-                    elif opcode == 70:  # callproperty
-                        index = u30(coder)
-                        mname = multinames[index]
-                        arg_count = u30(coder)
-                        args = list(reversed(
-                            [stack.pop() for _ in range(arg_count)]))
-                        obj = stack.pop()
-                        if mname == u'split':
-                            assert len(args) == 1
-                            assert isinstance(args[0], compat_str)
-                            assert isinstance(obj, compat_str)
-                            if args[0] == u'':
-                                res = list(obj)
-                            else:
-                                res = obj.split(args[0])
-                            stack.append(res)
-                        elif mname == u'slice':
-                            assert len(args) == 1
-                            assert isinstance(args[0], int)
-                            assert isinstance(obj, list)
-                            res = obj[args[0]:]
-                            stack.append(res)
-                        elif mname == u'join':
-                            assert len(args) == 1
-                            assert isinstance(args[0], compat_str)
-                            assert isinstance(obj, list)
-                            res = args[0].join(obj)
-                            stack.append(res)
-                        elif mname in method_pyfunctions:
-                            stack.append(method_pyfunctions[mname](args))
-                        else:
-                            raise NotImplementedError(
-                                u'Unsupported property %r on %r'
-                                % (mname, obj))
-                    elif opcode == 72:  # returnvalue
-                        res = stack.pop()
-                        return res
-                    elif opcode == 79:  # callpropvoid
-                        index = u30(coder)
-                        mname = multinames[index]
-                        arg_count = u30(coder)
-                        args = list(reversed(
-                            [stack.pop() for _ in range(arg_count)]))
-                        obj = stack.pop()
-                        if mname == u'reverse':
-                            assert isinstance(obj, list)
-                            obj.reverse()
-                        else:
-                            raise NotImplementedError(
-                                u'Unsupported (void) property %r on %r'
-                                % (mname, obj))
-                    elif opcode == 93:  # findpropstrict
-                        index = u30(coder)
-                        mname = multinames[index]
-                        res = extract_function(mname)
-                        stack.append(res)
-                    elif opcode == 97:  # setproperty
-                        index = u30(coder)
-                        value = stack.pop()
-                        idx = stack.pop()
-                        obj = stack.pop()
-                        assert isinstance(obj, list)
-                        assert isinstance(idx, int)
-                        obj[idx] = value
-                    elif opcode == 98:  # getlocal
-                        index = u30(coder)
-                        stack.append(registers[index])
-                    elif opcode == 99:  # setlocal
-                        index = u30(coder)
-                        value = stack.pop()
-                        registers[index] = value
-                    elif opcode == 102:  # getproperty
-                        index = u30(coder)
-                        pname = multinames[index]
-                        if pname == u'length':
-                            obj = stack.pop()
-                            assert isinstance(obj, list)
-                            stack.append(len(obj))
-                        else:  # Assume attribute access
-                            idx = stack.pop()
-                            assert isinstance(idx, int)
-                            obj = stack.pop()
-                            assert isinstance(obj, list)
-                            stack.append(obj[idx])
-                    elif opcode == 128:  # coerce
-                        u30(coder)
-                    elif opcode == 133:  # coerce_s
-                        assert isinstance(stack[-1], (type(None), compat_str))
-                    elif opcode == 164:  # modulo
-                        value2 = stack.pop()
-                        value1 = stack.pop()
-                        res = value1 % value2
-                        stack.append(res)
-                    elif opcode == 208:  # getlocal_0
-                        stack.append(registers[0])
-                    elif opcode == 209:  # getlocal_1
-                        stack.append(registers[1])
-                    elif opcode == 210:  # getlocal_2
-                        stack.append(registers[2])
-                    elif opcode == 211:  # getlocal_3
-                        stack.append(registers[3])
-                    elif opcode == 214:  # setlocal_2
-                        registers[2] = stack.pop()
-                    elif opcode == 215:  # setlocal_3
-                        registers[3] = stack.pop()
-                    else:
-                        raise NotImplementedError(
-                            u'Unsupported opcode %d' % opcode)
-
-            method_pyfunctions[func_name] = resfunc
-            return resfunc
-
-        initial_function = extract_function(u'decipher')
+        searched_class = swfi.extract_class(TARGET_CLASSNAME)
+        initial_function = swfi.extract_function(searched_class, u'decipher')
         return lambda s: initial_function([s])
 
     def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
@@ -871,20 +473,20 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         if player_url.startswith(u'//'):
             player_url = u'https:' + player_url
         try:
-            player_id = (player_url, len(s))
+            player_id = (player_url, self._signature_cache_id(s))
             if player_id not in self._player_cache:
                 func = self._extract_signature_function(
-                    video_id, player_url, len(s)
+                    video_id, player_url, s
                 )
                 self._player_cache[player_id] = func
             func = self._player_cache[player_id]
             if self._downloader.params.get('youtube_print_sig_code'):
-                self._print_sig_code(func, len(s))
+                self._print_sig_code(func, s)
             return func(s)
         except Exception as e:
             tb = traceback.format_exc()
             raise ExtractorError(
-                u'Automatic signature extraction failed: ' + tb, cause=e)
+                u'Signature extraction failed: ' + tb, cause=e)
 
     def _get_available_subtitles(self, video_id, webpage):
         try:
@@ -1014,14 +616,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             age_gate = True
             # We simulate the access to the video from www.youtube.com/v/{video_id}
             # this can be viewed without login into Youtube
-            data = compat_urllib_parse.urlencode({'video_id': video_id,
-                                                  'el': 'player_embedded',
-                                                  'gl': 'US',
-                                                  'hl': 'en',
-                                                  'eurl': 'https://youtube.googleapis.com/v/' + video_id,
-                                                  'asv': 3,
-                                                  'sts':'1588',
-                                                  })
+            data = compat_urllib_parse.urlencode({
+                'video_id': video_id,
+                'eurl': 'https://youtube.googleapis.com/v/' + video_id,
+                'sts': self._search_regex(
+                    r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
+            })
             video_info_url = proto + '://www.youtube.com/get_video_info?' + data
             video_info_webpage = self._download_webpage(video_info_url, video_id,
                                     note=False,
@@ -1214,43 +814,54 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             url_map = {}
             for url_data_str in encoded_url_map.split(','):
                 url_data = compat_parse_qs(url_data_str)
-                if 'itag' in url_data and 'url' in url_data:
-                    url = url_data['url'][0]
-                    if 'sig' in url_data:
-                        url += '&signature=' + url_data['sig'][0]
-                    elif 's' in url_data:
-                        encrypted_sig = url_data['s'][0]
-                        if self._downloader.params.get('verbose'):
-                            if age_gate:
-                                if player_url is None:
-                                    player_version = 'unknown'
-                                else:
-                                    player_version = self._search_regex(
-                                        r'-(.+)\.swf$', player_url,
-                                        u'flash player', fatal=False)
+                if 'itag' not in url_data or 'url' not in url_data:
+                    continue
+                format_id = url_data['itag'][0]
+                url = url_data['url'][0]
+
+                if 'sig' in url_data:
+                    url += '&signature=' + url_data['sig'][0]
+                elif 's' in url_data:
+                    encrypted_sig = url_data['s'][0]
+
+                    if not age_gate:
+                        jsplayer_url_json = self._search_regex(
+                            r'"assets":.+?"js":\s*("[^"]+")',
+                            video_webpage, u'JS player URL')
+                        player_url = json.loads(jsplayer_url_json)
+                    if player_url is None:
+                        player_url_json = self._search_regex(
+                            r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
+                            video_webpage, u'age gate player URL')
+                        player_url = json.loads(player_url_json)
+
+                    if self._downloader.params.get('verbose'):
+                        if player_url is None:
+                            player_version = 'unknown'
+                            player_desc = 'unknown'
+                        else:
+                            if player_url.endswith('swf'):
+                                player_version = self._search_regex(
+                                    r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
+                                    u'flash player', fatal=False)
                                 player_desc = 'flash player %s' % player_version
                             else:
                                 player_version = self._search_regex(
-                                    r'html5player-(.+?)\.js', video_webpage,
+                                    r'html5player-([^/]+?)(?:/html5player)?\.js',
+                                    player_url,
                                     'html5 player', fatal=False)
                                 player_desc = u'html5 player %s' % player_version
 
-                            parts_sizes = u'.'.join(compat_str(len(part)) for part in encrypted_sig.split('.'))
-                            self.to_screen(u'encrypted signature length %d (%s), itag %s, %s' %
-                                (len(encrypted_sig), parts_sizes, url_data['itag'][0], player_desc))
-
-                        if not age_gate:
-                            jsplayer_url_json = self._search_regex(
-                                r'"assets":.+?"js":\s*("[^"]+")',
-                                video_webpage, u'JS player URL')
-                            player_url = json.loads(jsplayer_url_json)
-
-                        signature = self._decrypt_signature(
-                            encrypted_sig, video_id, player_url, age_gate)
-                        url += '&signature=' + signature
-                    if 'ratebypass' not in url:
-                        url += '&ratebypass=yes'
-                    url_map[url_data['itag'][0]] = url
+                        parts_sizes = self._signature_cache_id(encrypted_sig)
+                        self.to_screen(u'{%s} signature length %s, %s' %
+                            (format_id, parts_sizes, player_desc))
+
+                    signature = self._decrypt_signature(
+                        encrypted_sig, video_id, player_url, age_gate)
+                    url += '&signature=' + signature
+                if 'ratebypass' not in url:
+                    url += '&ratebypass=yes'
+                url_map[format_id] = url
             formats = _map_to_format_list(url_map)
         elif video_info.get('hlsvp'):
             manifest_url = video_info['hlsvp'][0]
index ae5bca2e643f1ec4d719e8e895b0a655a98a151a..c40cd376d120f2063bb4cf6958ca4cf701db1f00 100644 (file)
@@ -1,5 +1,6 @@
 from __future__ import unicode_literals
 
+import json
 import re
 
 from .utils import (
@@ -40,8 +41,9 @@ class JSInterpreter(object):
             assign = lambda v: v
             expr = stmt[len('return '):]
         else:
-            raise ExtractorError(
-                'Cannot determine left side of statement in %r' % stmt)
+            # Try interpreting it as an expression
+            expr = stmt
+            assign = lambda v: v
 
         v = self.interpret_expression(expr, local_vars, allow_recursion)
         return assign(v)
@@ -53,35 +55,63 @@ class JSInterpreter(object):
         if expr.isalpha():
             return local_vars[expr]
 
-        m = re.match(r'^(?P<in>[a-z]+)\.(?P<member>.*)$', expr)
+        try:
+            return json.loads(expr)
+        except ValueError:
+            pass
+
+        m = re.match(
+            r'^(?P<var>[a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
+            expr)
         if m:
+            variable = m.group('var')
             member = m.group('member')
-            variable = m.group('in')
+            arg_str = m.group('args')
 
-            if variable not in local_vars:
+            if variable in local_vars:
+                obj = local_vars[variable]
+            else:
                 if variable not in self._objects:
                     self._objects[variable] = self.extract_object(variable)
                 obj = self._objects[variable]
-                key, args = member.split('(', 1)
-                args = args.strip(')')
-                argvals = [int(v) if v.isdigit() else local_vars[v]
-                           for v in args.split(',')]
-                return obj[key](argvals)
-
-            val = local_vars[variable]
-            if member == 'split("")':
-                return list(val)
-            if member == 'join("")':
-                return ''.join(val)
-            if member == 'length':
-                return len(val)
-            if member == 'reverse()':
-                return val[::-1]
-            slice_m = re.match(r'slice\((?P<idx>.*)\)', member)
-            if slice_m:
-                idx = self.interpret_expression(
-                    slice_m.group('idx'), local_vars, allow_recursion - 1)
-                return val[idx:]
+
+            if arg_str is None:
+                # Member access
+                if member == 'length':
+                    return len(obj)
+                return obj[member]
+
+            assert expr.endswith(')')
+            # Function call
+            if arg_str == '':
+                argvals = tuple()
+            else:
+                argvals = tuple([
+                    self.interpret_expression(v, local_vars, allow_recursion)
+                    for v in arg_str.split(',')])
+
+            if member == 'split':
+                assert argvals == ('',)
+                return list(obj)
+            if member == 'join':
+                assert len(argvals) == 1
+                return argvals[0].join(obj)
+            if member == 'reverse':
+                assert len(argvals) == 0
+                obj.reverse()
+                return obj
+            if member == 'slice':
+                assert len(argvals) == 1
+                return obj[argvals[0]:]
+            if member == 'splice':
+                assert isinstance(obj, list)
+                index, howMany = argvals
+                res = []
+                for i in range(index, min(index + howMany, len(obj))):
+                    res.append(obj.pop(index))
+                return res
+
+            return obj[member](argvals)
 
         m = re.match(
             r'^(?P<in>[a-z]+)\[(?P<idx>.+)\]$', expr)
@@ -103,10 +133,11 @@ class JSInterpreter(object):
             r'^(?P<func>[a-zA-Z$]+)\((?P<args>[a-z0-9,]+)\)$', expr)
         if m:
             fname = m.group('func')
+            argvals = tuple([
+                int(v) if v.isdigit() else local_vars[v]
+                for v in m.group('args').split(',')])
             if fname not in self._functions:
                 self._functions[fname] = self.extract_function(fname)
-            argvals = [int(v) if v.isdigit() else local_vars[v]
-                       for v in m.group('args').split(',')]
             return self._functions[fname](argvals)
         raise ExtractorError('Unsupported JS expression %r' % expr)
 
@@ -114,13 +145,13 @@ class JSInterpreter(object):
         obj = {}
         obj_m = re.search(
             (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
-            r'\s*(?P<fields>([a-zA-Z$]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' +
+            r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' +
             r'\}\s*;',
             self.code)
         fields = obj_m.group('fields')
         # Currently, it only supports function definitions
         fields_m = re.finditer(
-            r'(?P<key>[a-zA-Z$]+)\s*:\s*function'
+            r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function'
             r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
             fields)
         for f in fields_m:
index 45328ed43ef44052fedb9dcf5f2de012aa3007ae..8c5f7c43b75b17466a91a2b54c928ec8ca2298f3 100644 (file)
@@ -18,14 +18,15 @@ from ..utils import (
 )
 
 
-
 class FFmpegPostProcessorError(PostProcessingError):
     pass
 
+
 class FFmpegPostProcessor(PostProcessor):
-    def __init__(self,downloader=None):
+    def __init__(self, downloader=None, deletetempfiles=False):
         PostProcessor.__init__(self, downloader)
         self._exes = self.detect_executables()
+        self._deletetempfiles = deletetempfiles
 
     @staticmethod
     def detect_executables():
@@ -60,6 +61,9 @@ class FFmpegPostProcessor(PostProcessor):
             stderr = stderr.decode('utf-8', 'replace')
             msg = stderr.strip().split('\n')[-1]
             raise FFmpegPostProcessorError(msg)
+        if self._deletetempfiles:
+            for ipath in input_paths:
+                os.remove(ipath)
 
     def run_ffmpeg(self, path, out_path, opts):
         self.run_ffmpeg_multiple_files([path], out_path, opts)
diff --git a/youtube_dl/swfinterp.py b/youtube_dl/swfinterp.py
new file mode 100644 (file)
index 0000000..b63c65b
--- /dev/null
@@ -0,0 +1,609 @@
+from __future__ import unicode_literals
+
+import collections
+import io
+import zlib
+
+from .utils import (
+    compat_str,
+    ExtractorError,
+    struct_unpack,
+)
+
+
+def _extract_tags(file_contents):
+    if file_contents[1:3] != b'WS':
+        raise ExtractorError(
+            'Not an SWF file; header is %r' % file_contents[:3])
+    if file_contents[:1] == b'C':
+        content = zlib.decompress(file_contents[8:])
+    else:
+        raise NotImplementedError(
+            'Unsupported compression format %r' %
+            file_contents[:1])
+
+    # Determine number of bits in framesize rectangle
+    framesize_nbits = struct_unpack('!B', content[:1])[0] >> 3
+    framesize_len = (5 + 4 * framesize_nbits + 7) // 8
+
+    pos = framesize_len + 2 + 2
+    while pos < len(content):
+        header16 = struct_unpack('<H', content[pos:pos + 2])[0]
+        pos += 2
+        tag_code = header16 >> 6
+        tag_len = header16 & 0x3f
+        if tag_len == 0x3f:
+            tag_len = struct_unpack('<I', content[pos:pos + 4])[0]
+            pos += 4
+        assert pos + tag_len <= len(content), \
+            ('Tag %d ends at %d+%d - that\'s longer than the file (%d)'
+                % (tag_code, pos, tag_len, len(content)))
+        yield (tag_code, content[pos:pos + tag_len])
+        pos += tag_len
+
+
+class _AVMClass_Object(object):
+    def __init__(self, avm_class):
+        self.avm_class = avm_class
+
+    def __repr__(self):
+        return '%s#%x' % (self.avm_class.name, id(self))
+
+
+class _ScopeDict(dict):
+    def __init__(self, avm_class):
+        super(_ScopeDict, self).__init__()
+        self.avm_class = avm_class
+
+    def __repr__(self):
+        return '%s__Scope(%s)' % (
+            self.avm_class.name,
+            super(_ScopeDict, self).__repr__())
+
+
+class _AVMClass(object):
+    def __init__(self, name_idx, name):
+        self.name_idx = name_idx
+        self.name = name
+        self.method_names = {}
+        self.method_idxs = {}
+        self.methods = {}
+        self.method_pyfunctions = {}
+
+        self.variables = _ScopeDict(self)
+
+    def make_object(self):
+        return _AVMClass_Object(self)
+
+    def __repr__(self):
+        return '_AVMClass(%s)' % (self.name)
+
+    def register_methods(self, methods):
+        self.method_names.update(methods.items())
+        self.method_idxs.update(dict(
+            (idx, name)
+            for name, idx in methods.items()))
+
+
+class _Multiname(object):
+    def __init__(self, kind):
+        self.kind = kind
+
+    def __repr__(self):
+        return '[MULTINAME kind: 0x%x]' % self.kind
+
+
+def _read_int(reader):
+    res = 0
+    shift = 0
+    for _ in range(5):
+        buf = reader.read(1)
+        assert len(buf) == 1
+        b = struct_unpack('<B', buf)[0]
+        res = res | ((b & 0x7f) << shift)
+        if b & 0x80 == 0:
+            break
+        shift += 7
+    return res
+
+
+def _u30(reader):
+    res = _read_int(reader)
+    assert res & 0xf0000000 == 0
+    return res
+_u32 = _read_int
+
+
+def _s32(reader):
+    v = _read_int(reader)
+    if v & 0x80000000 != 0:
+        v = - ((v ^ 0xffffffff) + 1)
+    return v
+
+
+def _s24(reader):
+    bs = reader.read(3)
+    assert len(bs) == 3
+    last_byte = b'\xff' if (ord(bs[2:3]) >= 0x80) else b'\x00'
+    return struct_unpack('<i', bs + last_byte)[0]
+
+
+def _read_string(reader):
+    slen = _u30(reader)
+    resb = reader.read(slen)
+    assert len(resb) == slen
+    return resb.decode('utf-8')
+
+
+def _read_bytes(count, reader):
+    assert count >= 0
+    resb = reader.read(count)
+    assert len(resb) == count
+    return resb
+
+
+def _read_byte(reader):
+    resb = _read_bytes(1, reader=reader)
+    res = struct_unpack('<B', resb)[0]
+    return res
+
+
+class SWFInterpreter(object):
+    def __init__(self, file_contents):
+        code_tag = next(tag
+                        for tag_code, tag in _extract_tags(file_contents)
+                        if tag_code == 82)
+        p = code_tag.index(b'\0', 4) + 1
+        code_reader = io.BytesIO(code_tag[p:])
+
+        # Parse ABC (AVM2 ByteCode)
+
+        # Define a couple convenience methods
+        u30 = lambda *args: _u30(*args, reader=code_reader)
+        s32 = lambda *args: _s32(*args, reader=code_reader)
+        u32 = lambda *args: _u32(*args, reader=code_reader)
+        read_bytes = lambda *args: _read_bytes(*args, reader=code_reader)
+        read_byte = lambda *args: _read_byte(*args, reader=code_reader)
+
+        # minor_version + major_version
+        read_bytes(2 + 2)
+
+        # Constant pool
+        int_count = u30()
+        for _c in range(1, int_count):
+            s32()
+        uint_count = u30()
+        for _c in range(1, uint_count):
+            u32()
+        double_count = u30()
+        read_bytes(max(0, (double_count - 1)) * 8)
+        string_count = u30()
+        self.constant_strings = ['']
+        for _c in range(1, string_count):
+            s = _read_string(code_reader)
+            self.constant_strings.append(s)
+        namespace_count = u30()
+        for _c in range(1, namespace_count):
+            read_bytes(1)  # kind
+            u30()  # name
+        ns_set_count = u30()
+        for _c in range(1, ns_set_count):
+            count = u30()
+            for _c2 in range(count):
+                u30()
+        multiname_count = u30()
+        MULTINAME_SIZES = {
+            0x07: 2,  # QName
+            0x0d: 2,  # QNameA
+            0x0f: 1,  # RTQName
+            0x10: 1,  # RTQNameA
+            0x11: 0,  # RTQNameL
+            0x12: 0,  # RTQNameLA
+            0x09: 2,  # Multiname
+            0x0e: 2,  # MultinameA
+            0x1b: 1,  # MultinameL
+            0x1c: 1,  # MultinameLA
+        }
+        self.multinames = ['']
+        for _c in range(1, multiname_count):
+            kind = u30()
+            assert kind in MULTINAME_SIZES, 'Invalid multiname kind %r' % kind
+            if kind == 0x07:
+                u30()  # namespace_idx
+                name_idx = u30()
+                self.multinames.append(self.constant_strings[name_idx])
+            else:
+                self.multinames.append(_Multiname(kind))
+                for _c2 in range(MULTINAME_SIZES[kind]):
+                    u30()
+
+        # Methods
+        method_count = u30()
+        MethodInfo = collections.namedtuple(
+            'MethodInfo',
+            ['NEED_ARGUMENTS', 'NEED_REST'])
+        method_infos = []
+        for method_id in range(method_count):
+            param_count = u30()
+            u30()  # return type
+            for _ in range(param_count):
+                u30()  # param type
+            u30()  # name index (always 0 for youtube)
+            flags = read_byte()
+            if flags & 0x08 != 0:
+                # Options present
+                option_count = u30()
+                for c in range(option_count):
+                    u30()  # val
+                    read_bytes(1)  # kind
+            if flags & 0x80 != 0:
+                # Param names present
+                for _ in range(param_count):
+                    u30()  # param name
+            mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0)
+            method_infos.append(mi)
+
+        # Metadata
+        metadata_count = u30()
+        for _c in range(metadata_count):
+            u30()  # name
+            item_count = u30()
+            for _c2 in range(item_count):
+                u30()  # key
+                u30()  # value
+
+        def parse_traits_info():
+            trait_name_idx = u30()
+            kind_full = read_byte()
+            kind = kind_full & 0x0f
+            attrs = kind_full >> 4
+            methods = {}
+            if kind in [0x00, 0x06]:  # Slot or Const
+                u30()  # Slot id
+                u30()  # type_name_idx
+                vindex = u30()
+                if vindex != 0:
+                    read_byte()  # vkind
+            elif kind in [0x01, 0x02, 0x03]:  # Method / Getter / Setter
+                u30()  # disp_id
+                method_idx = u30()
+                methods[self.multinames[trait_name_idx]] = method_idx
+            elif kind == 0x04:  # Class
+                u30()  # slot_id
+                u30()  # classi
+            elif kind == 0x05:  # Function
+                u30()  # slot_id
+                function_idx = u30()
+                methods[function_idx] = self.multinames[trait_name_idx]
+            else:
+                raise ExtractorError('Unsupported trait kind %d' % kind)
+
+            if attrs & 0x4 != 0:  # Metadata present
+                metadata_count = u30()
+                for _c3 in range(metadata_count):
+                    u30()  # metadata index
+
+            return methods
+
+        # Classes
+        class_count = u30()
+        classes = []
+        for class_id in range(class_count):
+            name_idx = u30()
+
+            cname = self.multinames[name_idx]
+            avm_class = _AVMClass(name_idx, cname)
+            classes.append(avm_class)
+
+            u30()  # super_name idx
+            flags = read_byte()
+            if flags & 0x08 != 0:  # Protected namespace is present
+                u30()  # protected_ns_idx
+            intrf_count = u30()
+            for _c2 in range(intrf_count):
+                u30()
+            u30()  # iinit
+            trait_count = u30()
+            for _c2 in range(trait_count):
+                trait_methods = parse_traits_info()
+                avm_class.register_methods(trait_methods)
+
+        assert len(classes) == class_count
+        self._classes_by_name = dict((c.name, c) for c in classes)
+
+        for avm_class in classes:
+            u30()  # cinit
+            trait_count = u30()
+            for _c2 in range(trait_count):
+                trait_methods = parse_traits_info()
+                avm_class.register_methods(trait_methods)
+
+        # Scripts
+        script_count = u30()
+        for _c in range(script_count):
+            u30()  # init
+            trait_count = u30()
+            for _c2 in range(trait_count):
+                parse_traits_info()
+
+        # Method bodies
+        method_body_count = u30()
+        Method = collections.namedtuple('Method', ['code', 'local_count'])
+        for _c in range(method_body_count):
+            method_idx = u30()
+            u30()  # max_stack
+            local_count = u30()
+            u30()  # init_scope_depth
+            u30()  # max_scope_depth
+            code_length = u30()
+            code = read_bytes(code_length)
+            for avm_class in classes:
+                if method_idx in avm_class.method_idxs:
+                    m = Method(code, local_count)
+                    avm_class.methods[avm_class.method_idxs[method_idx]] = m
+            exception_count = u30()
+            for _c2 in range(exception_count):
+                u30()  # from
+                u30()  # to
+                u30()  # target
+                u30()  # exc_type
+                u30()  # var_name
+            trait_count = u30()
+            for _c2 in range(trait_count):
+                parse_traits_info()
+
+        assert p + code_reader.tell() == len(code_tag)
+
+    def extract_class(self, class_name):
+        try:
+            return self._classes_by_name[class_name]
+        except KeyError:
+            raise ExtractorError('Class %r not found' % class_name)
+
+    def extract_function(self, avm_class, func_name):
+        if func_name in avm_class.method_pyfunctions:
+            return avm_class.method_pyfunctions[func_name]
+        if func_name in self._classes_by_name:
+            return self._classes_by_name[func_name].make_object()
+        if func_name not in avm_class.methods:
+            raise ExtractorError('Cannot find function %s.%s' % (
+                avm_class.name, func_name))
+        m = avm_class.methods[func_name]
+
+        def resfunc(args):
+            # Helper functions
+            coder = io.BytesIO(m.code)
+            s24 = lambda: _s24(coder)
+            u30 = lambda: _u30(coder)
+
+            registers = [avm_class.variables] + list(args) + [None] * m.local_count
+            stack = []
+            scopes = collections.deque([
+                self._classes_by_name, avm_class.variables])
+            while True:
+                opcode = _read_byte(coder)
+                if opcode == 17:  # iftrue
+                    offset = s24()
+                    value = stack.pop()
+                    if value:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 18:  # iffalse
+                    offset = s24()
+                    value = stack.pop()
+                    if not value:
+                        coder.seek(coder.tell() + offset)
+                elif opcode == 36:  # pushbyte
+                    v = _read_byte(coder)
+                    stack.append(v)
+                elif opcode == 42:  # dup
+                    value = stack[-1]
+                    stack.append(value)
+                elif opcode == 44:  # pushstring
+                    idx = u30()
+                    stack.append(self.constant_strings[idx])
+                elif opcode == 48:  # pushscope
+                    new_scope = stack.pop()
+                    scopes.append(new_scope)
+                elif opcode == 66:  # construct
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
+                    res = obj.avm_class.make_object()
+                    stack.append(res)
+                elif opcode == 70:  # callproperty
+                    index = u30()
+                    mname = self.multinames[index]
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
+
+                    if isinstance(obj, _AVMClass_Object):
+                        func = self.extract_function(obj.avm_class, mname)
+                        res = func(args)
+                        stack.append(res)
+                        continue
+                    elif isinstance(obj, _ScopeDict):
+                        if mname in obj.avm_class.method_names:
+                            func = self.extract_function(obj.avm_class, mname)
+                            res = func(args)
+                        else:
+                            res = obj[mname]
+                        stack.append(res)
+                        continue
+                    elif isinstance(obj, compat_str):
+                        if mname == 'split':
+                            assert len(args) == 1
+                            assert isinstance(args[0], compat_str)
+                            if args[0] == '':
+                                res = list(obj)
+                            else:
+                                res = obj.split(args[0])
+                            stack.append(res)
+                            continue
+                    elif isinstance(obj, list):
+                        if mname == 'slice':
+                            assert len(args) == 1
+                            assert isinstance(args[0], int)
+                            res = obj[args[0]:]
+                            stack.append(res)
+                            continue
+                        elif mname == 'join':
+                            assert len(args) == 1
+                            assert isinstance(args[0], compat_str)
+                            res = args[0].join(obj)
+                            stack.append(res)
+                            continue
+                    raise NotImplementedError(
+                        'Unsupported property %r on %r'
+                        % (mname, obj))
+                elif opcode == 72:  # returnvalue
+                    res = stack.pop()
+                    return res
+                elif opcode == 74:  # constructproperty
+                    index = u30()
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
+
+                    mname = self.multinames[index]
+                    assert isinstance(obj, _AVMClass)
+
+                    # We do not actually call the constructor for now;
+                    # we just pretend it does nothing
+                    stack.append(obj.make_object())
+                elif opcode == 79:  # callpropvoid
+                    index = u30()
+                    mname = self.multinames[index]
+                    arg_count = u30()
+                    args = list(reversed(
+                        [stack.pop() for _ in range(arg_count)]))
+                    obj = stack.pop()
+                    if mname == 'reverse':
+                        assert isinstance(obj, list)
+                        obj.reverse()
+                    else:
+                        raise NotImplementedError(
+                            'Unsupported (void) property %r on %r'
+                            % (mname, obj))
+                elif opcode == 86:  # newarray
+                    arg_count = u30()
+                    arr = []
+                    for i in range(arg_count):
+                        arr.append(stack.pop())
+                    arr = arr[::-1]
+                    stack.append(arr)
+                elif opcode == 93:  # findpropstrict
+                    index = u30()
+                    mname = self.multinames[index]
+                    for s in reversed(scopes):
+                        if mname in s:
+                            res = s
+                            break
+                    else:
+                        res = scopes[0]
+                    stack.append(res[mname])
+                elif opcode == 94:  # findproperty
+                    index = u30()
+                    mname = self.multinames[index]
+                    for s in reversed(scopes):
+                        if mname in s:
+                            res = s
+                            break
+                    else:
+                        res = avm_class.variables
+                    stack.append(res)
+                elif opcode == 96:  # getlex
+                    index = u30()
+                    mname = self.multinames[index]
+                    for s in reversed(scopes):
+                        if mname in s:
+                            scope = s
+                            break
+                    else:
+                        scope = avm_class.variables
+                    # I cannot find where static variables are initialized
+                    # so let's just return None
+                    res = scope.get(mname)
+                    stack.append(res)
+                elif opcode == 97:  # setproperty
+                    index = u30()
+                    value = stack.pop()
+                    idx = self.multinames[index]
+                    if isinstance(idx, _Multiname):
+                        idx = stack.pop()
+                    obj = stack.pop()
+                    obj[idx] = value
+                elif opcode == 98:  # getlocal
+                    index = u30()
+                    stack.append(registers[index])
+                elif opcode == 99:  # setlocal
+                    index = u30()
+                    value = stack.pop()
+                    registers[index] = value
+                elif opcode == 102:  # getproperty
+                    index = u30()
+                    pname = self.multinames[index]
+                    if pname == 'length':
+                        obj = stack.pop()
+                        assert isinstance(obj, list)
+                        stack.append(len(obj))
+                    else:  # Assume attribute access
+                        idx = stack.pop()
+                        assert isinstance(idx, int)
+                        obj = stack.pop()
+                        assert isinstance(obj, list)
+                        stack.append(obj[idx])
+                elif opcode == 115:  # convert_
+                    value = stack.pop()
+                    intvalue = int(value)
+                    stack.append(intvalue)
+                elif opcode == 128:  # coerce
+                    u30()
+                elif opcode == 133:  # coerce_s
+                    assert isinstance(stack[-1], (type(None), compat_str))
+                elif opcode == 160:  # add
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    res = value1 + value2
+                    stack.append(res)
+                elif opcode == 161:  # subtract
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    res = value1 - value2
+                    stack.append(res)
+                elif opcode == 164:  # modulo
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    res = value1 % value2
+                    stack.append(res)
+                elif opcode == 175:  # greaterequals
+                    value2 = stack.pop()
+                    value1 = stack.pop()
+                    result = value1 >= value2
+                    stack.append(result)
+                elif opcode == 208:  # getlocal_0
+                    stack.append(registers[0])
+                elif opcode == 209:  # getlocal_1
+                    stack.append(registers[1])
+                elif opcode == 210:  # getlocal_2
+                    stack.append(registers[2])
+                elif opcode == 211:  # getlocal_3
+                    stack.append(registers[3])
+                elif opcode == 212:  # setlocal_0
+                    registers[0] = stack.pop()
+                elif opcode == 213:  # setlocal_1
+                    registers[1] = stack.pop()
+                elif opcode == 214:  # setlocal_2
+                    registers[2] = stack.pop()
+                elif opcode == 215:  # setlocal_3
+                    registers[3] = stack.pop()
+                else:
+                    raise NotImplementedError(
+                        'Unsupported opcode %d' % opcode)
+
+        avm_class.method_pyfunctions[func_name] = resfunc
+        return resfunc
+
index 64a9618ca62493f893af16b31b3fbd331bbdc1e7..e40b367c255719046bf2d5dd2fd63bc6bb2e4d8a 100644 (file)
@@ -91,11 +91,9 @@ except ImportError:
     compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
 
 try:
-    from urllib.parse import parse_qs as compat_parse_qs
-except ImportError: # Python 2
-    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
-    # Python 2's version is apparently totally broken
-    def _unquote(string, encoding='utf-8', errors='replace'):
+    from urllib.parse import unquote as compat_urllib_parse_unquote
+except ImportError:
+    def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
         if string == '':
             return string
         res = string.split('%')
@@ -130,6 +128,13 @@ except ImportError: # Python 2
             string += pct_sequence.decode(encoding, errors)
         return string
 
+
+try:
+    from urllib.parse import parse_qs as compat_parse_qs
+except ImportError: # Python 2
+    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
+    # Python 2's version is apparently totally broken
+
     def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
                 encoding='utf-8', errors='replace'):
         qs, _coerce_result = qs, unicode
@@ -149,10 +154,12 @@ except ImportError: # Python 2
                     continue
             if len(nv[1]) or keep_blank_values:
                 name = nv[0].replace('+', ' ')
-                name = _unquote(name, encoding=encoding, errors=errors)
+                name = compat_urllib_parse_unquote(
+                    name, encoding=encoding, errors=errors)
                 name = _coerce_result(name)
                 value = nv[1].replace('+', ' ')
-                value = _unquote(value, encoding=encoding, errors=errors)
+                value = compat_urllib_parse_unquote(
+                    value, encoding=encoding, errors=errors)
                 value = _coerce_result(value)
                 r.append((name, value))
         return r
@@ -235,8 +242,8 @@ else:
 if sys.version_info >= (2,7):
     def find_xpath_attr(node, xpath, key, val):
         """ Find the xpath xpath[@key=val] """
-        assert re.match(r'^[a-zA-Z]+$', key)
-        assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val)
+        assert re.match(r'^[a-zA-Z-]+$', key)
+        assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
         expr = xpath + u"[@%s='%s']" % (key, val)
         return node.find(expr)
 else:
@@ -845,6 +852,8 @@ def unified_strdate(date_str):
     return upload_date
 
 def determine_ext(url, default_ext=u'unknown_video'):
+    if url is None:
+        return default_ext
     guess = url.partition(u'?')[0].rpartition(u'.')[2]
     if re.match(r'^[A-Za-z0-9]+$', guess):
         return guess
@@ -1193,11 +1202,6 @@ def format_bytes(bytes):
     return u'%.2f%s' % (converted, suffix)
 
 
-def str_to_int(int_str):
-    int_str = re.sub(r'[,\.]', u'', int_str)
-    return int(int_str)
-
-
 def get_term_width():
     columns = os.environ.get('COLUMNS', None)
     if columns:
@@ -1265,15 +1269,22 @@ class HEADRequest(compat_urllib_request.Request):
         return "HEAD"
 
 
-def int_or_none(v, scale=1, default=None, get_attr=None):
+def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
     if get_attr:
         if v is not None:
             v = getattr(v, get_attr, None)
-    return default if v is None else (int(v) // scale)
+    return default if v is None else (int(v) * invscale // scale)
+
+
+def str_to_int(int_str):
+    if int_str is None:
+        return None
+    int_str = re.sub(r'[,\.]', u'', int_str)
+    return int(int_str)
 
 
-def float_or_none(v, scale=1, default=None):
-    return default if v is None else (float(v) / scale)
+def float_or_none(v, scale=1, invscale=1, default=None):
+    return default if v is None else (float(v) * invscale / scale)
 
 
 def parse_duration(s):
index 4d606c3d2333ffbcdbfb64d55f4e8a2a8db3bf75..08b5339f6e377294deca4372862a4b288b4d9023 100644 (file)
@@ -1,2 +1,2 @@
 
-__version__ = '2014.07.15'
+__version__ = '2014.08.05'