]> Raphaël G. Git Repositories - youtubedl/commitdiff
Imported Upstream version 2014.01.17.2
authorRogério Brito <rbrito@ime.usp.br>
Sun, 19 Jan 2014 04:38:23 +0000 (02:38 -0200)
committerRogério Brito <rbrito@ime.usp.br>
Sun, 19 Jan 2014 04:38:23 +0000 (02:38 -0200)
101 files changed:
README.md
README.txt
devscripts/bash-completion.in
devscripts/check-porn.py
devscripts/gh-pages/update-feed.py
devscripts/make_readme.py
devscripts/release.sh
setup.py
test/test_YoutubeDL.py
test/test_all_urls.py
test/test_download.py
test/test_playlists.py
test/test_subtitles.py
test/test_unicode_literals.py [new file with mode: 0644]
test/test_utils.py
test/test_youtube_signature.py
youtube-dl
youtube-dl.1
youtube-dl.bash-completion
youtube_dl/FileDownloader.py
youtube_dl/YoutubeDL.py
youtube_dl/__init__.py
youtube_dl/downloader/__init__.py [new file with mode: 0644]
youtube_dl/downloader/common.py [new file with mode: 0644]
youtube_dl/downloader/hls.py [new file with mode: 0644]
youtube_dl/downloader/http.py [new file with mode: 0644]
youtube_dl/downloader/mplayer.py [new file with mode: 0644]
youtube_dl/downloader/rtmp.py [new file with mode: 0644]
youtube_dl/extractor/__init__.py
youtube_dl/extractor/academicearth.py
youtube_dl/extractor/appletrailers.py
youtube_dl/extractor/archiveorg.py
youtube_dl/extractor/arte.py
youtube_dl/extractor/auengine.py
youtube_dl/extractor/bambuser.py
youtube_dl/extractor/bandcamp.py
youtube_dl/extractor/blinkx.py
youtube_dl/extractor/bliptv.py
youtube_dl/extractor/bloomberg.py
youtube_dl/extractor/brightcove.py
youtube_dl/extractor/c56.py
youtube_dl/extractor/channel9.py
youtube_dl/extractor/cmt.py [new file with mode: 0644]
youtube_dl/extractor/cnn.py
youtube_dl/extractor/collegehumor.py
youtube_dl/extractor/comedycentral.py
youtube_dl/extractor/common.py
youtube_dl/extractor/condenast.py
youtube_dl/extractor/cspan.py
youtube_dl/extractor/defense.py
youtube_dl/extractor/dreisat.py
youtube_dl/extractor/everyonesmixtape.py [new file with mode: 0644]
youtube_dl/extractor/flickr.py
youtube_dl/extractor/franceinter.py [new file with mode: 0644]
youtube_dl/extractor/francetv.py
youtube_dl/extractor/gamespot.py
youtube_dl/extractor/generic.py
youtube_dl/extractor/imdb.py
youtube_dl/extractor/ina.py
youtube_dl/extractor/internetvideoarchive.py
youtube_dl/extractor/ivi.py
youtube_dl/extractor/jpopsukitv.py [new file with mode: 0644]
youtube_dl/extractor/kankan.py
youtube_dl/extractor/khanacademy.py [new file with mode: 0644]
youtube_dl/extractor/lynda.py [new file with mode: 0644]
youtube_dl/extractor/macgamestore.py [new file with mode: 0644]
youtube_dl/extractor/mdr.py
youtube_dl/extractor/metacritic.py
youtube_dl/extractor/mit.py
youtube_dl/extractor/mixcloud.py
youtube_dl/extractor/mpora.py [new file with mode: 0644]
youtube_dl/extractor/mtv.py
youtube_dl/extractor/myvideo.py
youtube_dl/extractor/novamov.py [new file with mode: 0644]
youtube_dl/extractor/nowvideo.py
youtube_dl/extractor/orf.py
youtube_dl/extractor/pornhd.py
youtube_dl/extractor/pornhub.py
youtube_dl/extractor/redtube.py
youtube_dl/extractor/rtlnow.py
youtube_dl/extractor/smotri.py
youtube_dl/extractor/soundcloud.py
youtube_dl/extractor/spankwire.py
youtube_dl/extractor/spiegel.py
youtube_dl/extractor/teamcoco.py
youtube_dl/extractor/ted.py
youtube_dl/extractor/theplatform.py
youtube_dl/extractor/veehd.py
youtube_dl/extractor/veoh.py
youtube_dl/extractor/vimeo.py
youtube_dl/extractor/wistia.py
youtube_dl/extractor/yahoo.py
youtube_dl/extractor/youporn.py
youtube_dl/extractor/youtube.py
youtube_dl/extractor/zdf.py
youtube_dl/postprocessor/__init__.py [new file with mode: 0644]
youtube_dl/postprocessor/common.py [new file with mode: 0644]
youtube_dl/postprocessor/ffmpeg.py [moved from youtube_dl/PostProcessor.py with 80% similarity]
youtube_dl/postprocessor/xattrpp.py [new file with mode: 0644]
youtube_dl/utils.py
youtube_dl/version.py

index caed9484672d8844890d43d98708f83044e6b3d2..cf0bb7b654cba513c598697186abb3e4507dd3dd 100644 (file)
--- a/README.md
+++ b/README.md
@@ -34,12 +34,16 @@ which means you can modify it, redistribute it or use it however you like.
                                empty string (--proxy "") for direct connection
     --no-check-certificate     Suppress HTTPS certificate validation.
     --cache-dir DIR            Location in the filesystem where youtube-dl can
                                empty string (--proxy "") for direct connection
     --no-check-certificate     Suppress HTTPS certificate validation.
     --cache-dir DIR            Location in the filesystem where youtube-dl can
-                               store downloaded information permanently. By
+                               store some downloaded information permanently. By
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
-                               /youtube-dl .
+                               /youtube-dl . At the moment, only YouTube player
+                               files (for videos with obfuscated signatures) are
+                               cached, but that may change.
     --no-cache-dir             Disable filesystem caching
     --no-cache-dir             Disable filesystem caching
+    --socket-timeout None      Time to wait before giving up, in seconds
     --bidi-workaround          Work around terminals that lack bidirectional
     --bidi-workaround          Work around terminals that lack bidirectional
-                               text support. Requires fribidi executable in PATH
+                               text support. Requires bidiv or fribidi
+                               executable in PATH
 
 ## Video Selection:
     --playlist-start NUMBER    playlist video to start at (default is 1)
 
 ## Video Selection:
     --playlist-start NUMBER    playlist video to start at (default is 1)
@@ -54,8 +58,10 @@ which means you can modify it, redistribute it or use it however you like.
     --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
                                50k or 44.6m)
     --date DATE                download only videos uploaded in this date
     --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
                                50k or 44.6m)
     --date DATE                download only videos uploaded in this date
-    --datebefore DATE          download only videos uploaded before this date
-    --dateafter DATE           download only videos uploaded after this date
+    --datebefore DATE          download only videos uploaded on or before this
+                               date (i.e. inclusive)
+    --dateafter DATE           download only videos uploaded on or after this
+                               date (i.e. inclusive)
     --min-views COUNT          Do not download any videos with less than COUNT
                                views
     --max-views COUNT          Do not download any videos with more than COUNT
     --min-views COUNT          Do not download any videos with less than COUNT
                                views
     --max-views COUNT          Do not download any videos with more than COUNT
@@ -87,13 +93,13 @@ which means you can modify it, redistribute it or use it however you like.
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
                                extension, %(format)s for the format description
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
                                extension, %(format)s for the format description
-                               (like "22 - 1280x720" or "HD"),%(format_id)s for
+                               (like "22 - 1280x720" or "HD"), %(format_id)s for
                                the unique id of the format (like Youtube's
                                the unique id of the format (like Youtube's
-                               itags: "137"),%(upload_date)s for the upload date
-                               (YYYYMMDD), %(extractor)s for the provider
-                               (youtube, metacafe, etc), %(id)s for the video id
-                               , %(playlist)s for the playlist the video is in,
-                               %(playlist_index)s for the position in the
+                               itags: "137"), %(upload_date)s for the upload
+                               date (YYYYMMDD), %(extractor)s for the provider
+                               (youtube, metacafe, etc), %(id)s for the video
+                               id, %(playlist)s for the playlist the video is
+                               in, %(playlist_index)s for the position in the
                                playlist and %% for a literal percent. Use - to
                                output to stdout. Can also be used to download to
                                a different directory, for example with -o '/my/d
                                playlist and %% for a literal percent. Use - to
                                output to stdout. Can also be used to download to
                                a different directory, for example with -o '/my/d
@@ -105,7 +111,7 @@ which means you can modify it, redistribute it or use it however you like.
                                avoid "&" and spaces in filenames
     -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
     --load-info FILE           json file containing the video information
                                avoid "&" and spaces in filenames
     -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
     --load-info FILE           json file containing the video information
-                               (created with the "--write-json" option
+                               (created with the "--write-json" option)
     -w, --no-overwrites        do not overwrite files
     -c, --continue             force resume of partially downloaded files. By
                                default, youtube-dl will resume downloads if
     -w, --no-overwrites        do not overwrite files
     -c, --continue             force resume of partially downloaded files. By
                                default, youtube-dl will resume downloads if
@@ -139,7 +145,7 @@ which means you can modify it, redistribute it or use it however you like.
     --no-progress              do not print progress bar
     --console-title            display progress in console titlebar
     -v, --verbose              print various debugging information
     --no-progress              do not print progress bar
     --console-title            display progress in console titlebar
     -v, --verbose              print various debugging information
-    --dump-intermediate-pages  print downloaded pages to debug problems(very
+    --dump-intermediate-pages  print downloaded pages to debug problems (very
                                verbose)
     --write-pages              Write downloaded intermediary pages to files in
                                the current directory to debug problems
                                verbose)
     --write-pages              Write downloaded intermediary pages to files in
                                the current directory to debug problems
@@ -152,8 +158,7 @@ which means you can modify it, redistribute it or use it however you like.
     --prefer-free-formats      prefer free video formats unless a specific one
                                is requested
     --max-quality FORMAT       highest quality format to download
     --prefer-free-formats      prefer free video formats unless a specific one
                                is requested
     --max-quality FORMAT       highest quality format to download
-    -F, --list-formats         list all available formats (currently youtube
-                               only)
+    -F, --list-formats         list all available formats
 
 ## Subtitle Options:
     --write-sub                write subtitle file
 
 ## Subtitle Options:
     --write-sub                write subtitle file
@@ -171,7 +176,7 @@ which means you can modify it, redistribute it or use it however you like.
     -u, --username USERNAME    account username
     -p, --password PASSWORD    account password
     -n, --netrc                use .netrc authentication data
     -u, --username USERNAME    account username
     -p, --password PASSWORD    account password
     -n, --netrc                use .netrc authentication data
-    --video-password PASSWORD  video password (vimeo only)
+    --video-password PASSWORD  video password (vimeo, smotri)
 
 ## Post-processing Options:
     -x, --extract-audio        convert video files to audio-only files (requires
 
 ## Post-processing Options:
     -x, --extract-audio        convert video files to audio-only files (requires
@@ -189,7 +194,13 @@ which means you can modify it, redistribute it or use it however you like.
                                processed files are overwritten by default
     --embed-subs               embed subtitles in the video (only for mp4
                                videos)
                                processed files are overwritten by default
     --embed-subs               embed subtitles in the video (only for mp4
                                videos)
-    --add-metadata             add metadata to the files
+    --add-metadata             write metadata to the video file
+    --xattrs                   write metadata to the video file's xattrs (using
+                               dublin core and xdg standards)
+    --prefer-avconv            Prefer avconv over ffmpeg for running the
+                               postprocessors (default)
+    --prefer-ffmpeg            Prefer ffmpeg over avconv for running the
+                               postprocessors
 
 # CONFIGURATION
 
 
 # CONFIGURATION
 
@@ -228,9 +239,12 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
  
 Examples:
 
  
 Examples:
 
-       $ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
-       $ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
-       $ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
+  $ # Download only the videos uploaded in the last 6 months
+       $ youtube-dl --dateafter now-6months
+  $ # Download only the videos uploaded on January 1, 1970
+       $ youtube-dl --date 19700101
+  $ # will only download the videos uploaded in the 200x decade
+       $ youtube-dl --dateafter 20000101 --datebefore 20091231
 
 # FAQ
 
 
 # FAQ
 
@@ -309,7 +323,7 @@ Site support requests must contain an example URL. An example URL is a URL you m
 
 ###  Are you using the latest version?
 
 
 ###  Are you using the latest version?
 
-Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. Ábout 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
 
 ###  Is the issue already documented?
 
 
 ###  Is the issue already documented?
 
@@ -334,3 +348,7 @@ In particular, every site support request issue should only pertain to services
 ###  Is anyone going to need the feature?
 
 Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
 ###  Is anyone going to need the feature?
 
 Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
+
+###  Is your question about youtube-dl?
+
+It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
index 3645b8c6ce293feb0a921805377a50ef9e384e08..69cab28b5900b8300034139d0d191a42633c1c05 100644 (file)
@@ -41,12 +41,16 @@ OPTIONS
                                empty string (--proxy "") for direct connection
     --no-check-certificate     Suppress HTTPS certificate validation.
     --cache-dir DIR            Location in the filesystem where youtube-dl can
                                empty string (--proxy "") for direct connection
     --no-check-certificate     Suppress HTTPS certificate validation.
     --cache-dir DIR            Location in the filesystem where youtube-dl can
-                               store downloaded information permanently. By
+                               store some downloaded information permanently. By
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
                                default $XDG_CACHE_HOME/youtube-dl or ~/.cache
-                               /youtube-dl .
+                               /youtube-dl . At the moment, only YouTube player
+                               files (for videos with obfuscated signatures) are
+                               cached, but that may change.
     --no-cache-dir             Disable filesystem caching
     --no-cache-dir             Disable filesystem caching
+    --socket-timeout None      Time to wait before giving up, in seconds
     --bidi-workaround          Work around terminals that lack bidirectional
     --bidi-workaround          Work around terminals that lack bidirectional
-                               text support. Requires fribidi executable in PATH
+                               text support. Requires bidiv or fribidi
+                               executable in PATH
 
 Video Selection:
 ----------------
 
 Video Selection:
 ----------------
@@ -63,8 +67,10 @@ Video Selection:
     --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
                                50k or 44.6m)
     --date DATE                download only videos uploaded in this date
     --max-filesize SIZE        Do not download any videos larger than SIZE (e.g.
                                50k or 44.6m)
     --date DATE                download only videos uploaded in this date
-    --datebefore DATE          download only videos uploaded before this date
-    --dateafter DATE           download only videos uploaded after this date
+    --datebefore DATE          download only videos uploaded on or before this
+                               date (i.e. inclusive)
+    --dateafter DATE           download only videos uploaded on or after this
+                               date (i.e. inclusive)
     --min-views COUNT          Do not download any videos with less than COUNT
                                views
     --max-views COUNT          Do not download any videos with more than COUNT
     --min-views COUNT          Do not download any videos with less than COUNT
                                views
     --max-views COUNT          Do not download any videos with more than COUNT
@@ -100,13 +106,13 @@ Filesystem Options:
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
                                extension, %(format)s for the format description
                                different, %(autonumber)s to get an automatically
                                incremented number, %(ext)s for the filename
                                extension, %(format)s for the format description
-                               (like "22 - 1280x720" or "HD"),%(format_id)s for
+                               (like "22 - 1280x720" or "HD"), %(format_id)s for
                                the unique id of the format (like Youtube's
                                the unique id of the format (like Youtube's
-                               itags: "137"),%(upload_date)s for the upload date
-                               (YYYYMMDD), %(extractor)s for the provider
-                               (youtube, metacafe, etc), %(id)s for the video id
-                               , %(playlist)s for the playlist the video is in,
-                               %(playlist_index)s for the position in the
+                               itags: "137"), %(upload_date)s for the upload
+                               date (YYYYMMDD), %(extractor)s for the provider
+                               (youtube, metacafe, etc), %(id)s for the video
+                               id, %(playlist)s for the playlist the video is
+                               in, %(playlist_index)s for the position in the
                                playlist and %% for a literal percent. Use - to
                                output to stdout. Can also be used to download to
                                a different directory, for example with -o '/my/d
                                playlist and %% for a literal percent. Use - to
                                output to stdout. Can also be used to download to
                                a different directory, for example with -o '/my/d
@@ -118,7 +124,7 @@ Filesystem Options:
                                avoid "&" and spaces in filenames
     -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
     --load-info FILE           json file containing the video information
                                avoid "&" and spaces in filenames
     -a, --batch-file FILE      file containing URLs to download ('-' for stdin)
     --load-info FILE           json file containing the video information
-                               (created with the "--write-json" option
+                               (created with the "--write-json" option)
     -w, --no-overwrites        do not overwrite files
     -c, --continue             force resume of partially downloaded files. By
                                default, youtube-dl will resume downloads if
     -w, --no-overwrites        do not overwrite files
     -c, --continue             force resume of partially downloaded files. By
                                default, youtube-dl will resume downloads if
@@ -154,7 +160,7 @@ Verbosity / Simulation Options:
     --no-progress              do not print progress bar
     --console-title            display progress in console titlebar
     -v, --verbose              print various debugging information
     --no-progress              do not print progress bar
     --console-title            display progress in console titlebar
     -v, --verbose              print various debugging information
-    --dump-intermediate-pages  print downloaded pages to debug problems(very
+    --dump-intermediate-pages  print downloaded pages to debug problems (very
                                verbose)
     --write-pages              Write downloaded intermediary pages to files in
                                the current directory to debug problems
                                verbose)
     --write-pages              Write downloaded intermediary pages to files in
                                the current directory to debug problems
@@ -169,8 +175,7 @@ Video Format Options:
     --prefer-free-formats      prefer free video formats unless a specific one
                                is requested
     --max-quality FORMAT       highest quality format to download
     --prefer-free-formats      prefer free video formats unless a specific one
                                is requested
     --max-quality FORMAT       highest quality format to download
-    -F, --list-formats         list all available formats (currently youtube
-                               only)
+    -F, --list-formats         list all available formats
 
 Subtitle Options:
 -----------------
 
 Subtitle Options:
 -----------------
@@ -192,7 +197,7 @@ Authentication Options:
     -u, --username USERNAME    account username
     -p, --password PASSWORD    account password
     -n, --netrc                use .netrc authentication data
     -u, --username USERNAME    account username
     -p, --password PASSWORD    account password
     -n, --netrc                use .netrc authentication data
-    --video-password PASSWORD  video password (vimeo only)
+    --video-password PASSWORD  video password (vimeo, smotri)
 
 Post-processing Options:
 ------------------------
 
 Post-processing Options:
 ------------------------
@@ -212,7 +217,13 @@ Post-processing Options:
                                processed files are overwritten by default
     --embed-subs               embed subtitles in the video (only for mp4
                                videos)
                                processed files are overwritten by default
     --embed-subs               embed subtitles in the video (only for mp4
                                videos)
-    --add-metadata             add metadata to the files
+    --add-metadata             write metadata to the video file
+    --xattrs                   write metadata to the video file's xattrs (using
+                               dublin core and xdg standards)
+    --prefer-avconv            Prefer avconv over ffmpeg for running the
+                               postprocessors (default)
+    --prefer-ffmpeg            Prefer ffmpeg over avconv for running the
+                               postprocessors
 
 CONFIGURATION
 =============
 
 CONFIGURATION
 =============
@@ -277,9 +288,11 @@ Videos can be filtered by their upload date using the options --date,
 
 Examples:
 
 
 Examples:
 
-    $ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
-    $ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
-    $ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
+$ # Download only the videos uploaded in the last 6 months $ youtube-dl
+--dateafter now-6months $ # Download only the videos uploaded on January
+1, 1970 $ youtube-dl --date 19700101 $ # will only download the videos
+uploaded in the 200x decade $ youtube-dl --dateafter 20000101
+--datebefore 20091231
 
 FAQ
 ===
 
 FAQ
 ===
@@ -413,7 +426,7 @@ a video service (e.g. http://www.youtube.com/ ) is not an example URL.
 Are you using the latest version?
 
 Before reporting any issue, type youtube-dl -U. This should report that
 Are you using the latest version?
 
 Before reporting any issue, type youtube-dl -U. This should report that
-you're up-to-date. Ábout 20% of the reports we receive are already
+you're up-to-date. About 20% of the reports we receive are already
 fixed, but people are using outdated versions. This goes for feature
 requests as well.
 
 fixed, but people are using outdated versions. This goes for feature
 requests as well.
 
@@ -478,3 +491,13 @@ Only post features that you (or an incapicated friend you can personally
 talk to) require. Do not post features because they seem like a good
 idea. If they are really useful, they will be requested by someone who
 requires them.
 talk to) require. Do not post features because they seem like a good
 idea. If they are really useful, they will be requested by someone who
 requires them.
+
+Is your question about youtube-dl?
+
+It may sound strange, but some bug reports we receive are completely
+unrelated to youtube-dl and relate to a different or even the reporter's
+own application. Please make sure that you are actually using
+youtube-dl. If you are using a UI for youtube-dl, report the bug to the
+maintainer of the actual application providing the UI. On the other
+hand, if your UI for youtube-dl fails in some way you believe is related
+to youtube-dl, by all means, go ahead and report the bug.
index 3af87a3783a83cddf364e5f39b2bffd10b480950..28bd237278da5c0ade9ed96a4cb722c1b6973cf3 100644 (file)
@@ -6,7 +6,7 @@ __youtube_dl()
     prev="${COMP_WORDS[COMP_CWORD-1]}"
     opts="{{flags}}"
     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
     prev="${COMP_WORDS[COMP_CWORD-1]}"
     opts="{{flags}}"
     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
-    fileopts="-a|--batch-file|--download-archive|--cookies"
+    fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
     diropts="--cache-dir"
 
     if [[ ${prev} =~ ${fileopts} ]]; then
     diropts="--cache-dir"
 
     if [[ ${prev} =~ ${fileopts} ]]; then
index 63401fe18a12d3600ee741888656d28b8e52e9df..86aa37b5fb687acc91f29753f1bf8ba0db7b8e94 100644 (file)
@@ -3,6 +3,9 @@
 """
 This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
 if we are not 'age_limit' tagging some porn site
 """
 This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check
 if we are not 'age_limit' tagging some porn site
+
+A second approach implemented relies on a list of porn domains, to activate it
+pass the list filename as the only argument
 """
 
 # Allow direct execution
 """
 
 # Allow direct execution
@@ -11,25 +14,42 @@ import sys
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import get_testcases
 sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import get_testcases
+from youtube_dl.utils import compat_urllib_parse_urlparse
 from youtube_dl.utils import compat_urllib_request
 
 from youtube_dl.utils import compat_urllib_request
 
+if len(sys.argv) > 1:
+    METHOD = 'LIST'
+    LIST = open(sys.argv[1]).read().decode('utf8').strip()
+else:
+    METHOD = 'EURISTIC'
+
 for test in get_testcases():
 for test in get_testcases():
-    try:
-        webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
-    except:
-        print('\nFail: {0}'.format(test['name']))
-        continue
+    if METHOD == 'EURISTIC':
+        try:
+            webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
+        except:
+            print('\nFail: {0}'.format(test['name']))
+            continue
+
+        webpage = webpage.decode('utf8', 'replace')
+
+        RESULT = 'porn' in webpage.lower()
+
+    elif METHOD == 'LIST':
+        domain = compat_urllib_parse_urlparse(test['url']).netloc
+        if not domain:
+            print('\nFail: {0}'.format(test['name']))
+            continue
+        domain = '.'.join(domain.split('.')[-2:])
 
 
-    webpage = webpage.decode('utf8', 'replace')
+        RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
 
 
-    if 'porn' in webpage.lower() and ('info_dict' not in test
-                                      or 'age_limit' not in test['info_dict']
-                                      or test['info_dict']['age_limit'] != 18):
+    if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
+                   or test['info_dict']['age_limit'] != 18):
         print('\nPotential missing age_limit check: {0}'.format(test['name']))
 
         print('\nPotential missing age_limit check: {0}'.format(test['name']))
 
-    elif 'porn' not in webpage.lower() and ('info_dict' in test and
-                                            'age_limit' in test['info_dict'] and
-                                            test['info_dict']['age_limit'] == 18):
+    elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
+                         and test['info_dict']['age_limit'] == 18):
         print('\nPotential false negative: {0}'.format(test['name']))
 
     else:
         print('\nPotential false negative: {0}'.format(test['name']))
 
     else:
index 16571a924c132b8ba7849ea9ad81a4d63c6ed208..0ba15ae0f7c83a4eb2ac6b2b56aeb72d55b3a951 100755 (executable)
@@ -1,56 +1,76 @@
 #!/usr/bin/env python3
 
 import datetime
 #!/usr/bin/env python3
 
 import datetime
-
+import io
+import json
 import textwrap
 
 import textwrap
 
-import json
 
 
-atom_template=textwrap.dedent("""\
-                                                               <?xml version='1.0' encoding='utf-8'?>
-                                                               <atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
-                                                                       <atom:title>youtube-dl releases</atom:title>
-                                                                       <atom:id>youtube-dl-updates-feed</atom:id>
-                                                                       <atom:updated>@TIMESTAMP@</atom:updated>
-                                                                       @ENTRIES@
-                                                               </atom:feed>""")
-
-entry_template=textwrap.dedent("""
-                                                               <atom:entry>
-                                                                       <atom:id>youtube-dl-@VERSION@</atom:id>
-                                                                       <atom:title>New version @VERSION@</atom:title>
-                                                                       <atom:link href="http://rg3.github.io/youtube-dl" />
-                                                                       <atom:content type="xhtml">
-                                                                               <div xmlns="http://www.w3.org/1999/xhtml">
-                                                                                       Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
-                                                                               </div>
-                                                                       </atom:content>
-                                                                       <atom:author>
-                                                                               <atom:name>The youtube-dl maintainers</atom:name>
-                                                                       </atom:author>
-                                                                       <atom:updated>@TIMESTAMP@</atom:updated>
-                                                               </atom:entry>
-                                                               """)
+atom_template = textwrap.dedent("""\
+    <?xml version="1.0" encoding="utf-8"?>
+    <feed xmlns="http://www.w3.org/2005/Atom">
+        <link rel="self" href="http://rg3.github.io/youtube-dl/update/releases.atom" />
+        <title>youtube-dl releases</title>
+        <id>https://yt-dl.org/feed/youtube-dl-updates-feed</id>
+        <updated>@TIMESTAMP@</updated>
+        @ENTRIES@
+    </feed>""")
 
 
-now = datetime.datetime.now()
-now_iso = now.isoformat()
+entry_template = textwrap.dedent("""
+    <entry>
+        <id>https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@</id>
+        <title>New version @VERSION@</title>
+        <link href="http://rg3.github.io/youtube-dl" />
+        <content type="xhtml">
+            <div xmlns="http://www.w3.org/1999/xhtml">
+                Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
+            </div>
+        </content>
+        <author>
+            <name>The youtube-dl maintainers</name>
+        </author>
+        <updated>@TIMESTAMP@</updated>
+    </entry>
+    """)
 
 
-atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
+now = datetime.datetime.now()
+now_iso = now.isoformat() + 'Z'
 
 
-entries=[]
+atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
 
 versions_info = json.load(open('update/versions.json'))
 versions = list(versions_info['versions'].keys())
 versions.sort()
 
 
 versions_info = json.load(open('update/versions.json'))
 versions = list(versions_info['versions'].keys())
 versions.sort()
 
+entries = []
 for v in versions:
 for v in versions:
-       entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
-       entry = entry.replace('@VERSION@',v)
-       entries.append(entry)
+    fields = v.split('.')
+    year, month, day = map(int, fields[:3])
+    faked = 0
+    patchlevel = 0
+    while True:
+        try:
+            datetime.date(year, month, day)
+        except ValueError:
+            day -= 1
+            faked += 1
+            assert day > 0
+            continue
+        break
+    if len(fields) >= 4:
+        try:
+            patchlevel = int(fields[3])
+        except ValueError:
+            patchlevel = 1
+    timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel)
+
+    entry = entry_template.replace('@TIMESTAMP@', timestamp)
+    entry = entry.replace('@VERSION@', v)
+    entries.append(entry)
 
 entries_str = textwrap.indent(''.join(entries), '\t')
 atom_template = atom_template.replace('@ENTRIES@', entries_str)
 
 
 entries_str = textwrap.indent(''.join(entries), '\t')
 atom_template = atom_template.replace('@ENTRIES@', entries_str)
 
-with open('update/releases.atom','w',encoding='utf-8') as atom_file:
-       atom_file.write(atom_template)
+with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
+    atom_file.write(atom_template)
 
 
index 7f2ea319cca5c7c3433665c534758e93fedf8f89..cae1fa4f24fe0f190760e942666a379c47d041d7 100755 (executable)
@@ -1,20 +1,24 @@
+import io
 import sys
 import re
 
 README_FILE = 'README.md'
 helptext = sys.stdin.read()
 
 import sys
 import re
 
 README_FILE = 'README.md'
 helptext = sys.stdin.read()
 
-with open(README_FILE) as f:
+if isinstance(helptext, bytes):
+    helptext = helptext.decode('utf-8')
+
+with io.open(README_FILE, encoding='utf-8') as f:
     oldreadme = f.read()
 
 header = oldreadme[:oldreadme.index('# OPTIONS')]
 footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
 
     oldreadme = f.read()
 
 header = oldreadme[:oldreadme.index('# OPTIONS')]
 footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
 
-options = helptext[helptext.index('  General Options:')+19:]
+options = helptext[helptext.index('  General Options:') + 19:]
 options = re.sub(r'^  (\w.+)$', r'## \1', options, flags=re.M)
 options = '# OPTIONS\n' + options + '\n'
 
 options = re.sub(r'^  (\w.+)$', r'## \1', options, flags=re.M)
 options = '# OPTIONS\n' + options + '\n'
 
-with open(README_FILE, 'w') as f:
+with io.open(README_FILE, 'w', encoding='utf-8') as f:
     f.write(header)
     f.write(options)
     f.write(footer)
     f.write(header)
     f.write(options)
     f.write(footer)
index 2766174c1a8477519eb818f287897ebc93d04a72..323acf8cfa92cc7662c21fac44795790867901f4 100755 (executable)
@@ -24,6 +24,8 @@ if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.0
 version="$1"
 if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
 if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
 version="$1"
 if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
 if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
+useless_files=$(find youtube_dl -type f -not -name '*.py')
+if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi
 if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
 
 /bin/echo -e "\n### First of all, testing..."
 if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
 
 /bin/echo -e "\n### First of all, testing..."
index 8e24fe67918eeefa2f3f8b445ccfb480b8c841a8..1f45159cd3e641f2257abf6d8781ce9f31bbda11 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -71,7 +71,10 @@ setup(
     author_email='ytdl@yt-dl.org',
     maintainer='Philipp Hagemeister',
     maintainer_email='phihag@phihag.de',
     author_email='ytdl@yt-dl.org',
     maintainer='Philipp Hagemeister',
     maintainer_email='phihag@phihag.de',
-    packages=['youtube_dl', 'youtube_dl.extractor'],
+    packages=[
+        'youtube_dl',
+        'youtube_dl.extractor', 'youtube_dl.downloader',
+        'youtube_dl.postprocessor'],
 
     # Provokes warning on most systems (why?!)
     # test_suite = 'nose.collector',
 
     # Provokes warning on most systems (why?!)
     # test_suite = 'nose.collector',
index 3100c362aa6940d2c557dffb5cabb0f5564ef4a8..01de10e311865df805979e1f362f61ab25592a10 100644 (file)
@@ -8,6 +8,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import FakeYDL
 from youtube_dl import YoutubeDL
 
 from test.helper import FakeYDL
 from youtube_dl import YoutubeDL
+from youtube_dl.extractor import YoutubeIE
 
 
 class YDL(FakeYDL):
 
 
 class YDL(FakeYDL):
@@ -33,6 +34,8 @@ class TestFormatSelection(unittest.TestCase):
             {u'ext': u'mp4',  u'height': 460},
         ]
         info_dict = {u'formats': formats, u'extractor': u'test'}
             {u'ext': u'mp4',  u'height': 460},
         ]
         info_dict = {u'formats': formats, u'extractor': u'test'}
+        yie = YoutubeIE(ydl)
+        yie._sort_formats(info_dict['formats'])
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'webm')
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'webm')
@@ -45,28 +48,46 @@ class TestFormatSelection(unittest.TestCase):
             {u'ext': u'mp4', u'height': 1080},
         ]
         info_dict[u'formats'] = formats
             {u'ext': u'mp4', u'height': 1080},
         ]
         info_dict[u'formats'] = formats
+        yie = YoutubeIE(ydl)
+        yie._sort_formats(info_dict['formats'])
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'mp4')
 
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'mp4')
 
-        # No prefer_free_formats => keep original formats order
+        # No prefer_free_formats => prefer mp4 and flv for greater compatibilty
         ydl = YDL()
         ydl.params['prefer_free_formats'] = False
         formats = [
             {u'ext': u'webm', u'height': 720},
         ydl = YDL()
         ydl.params['prefer_free_formats'] = False
         formats = [
             {u'ext': u'webm', u'height': 720},
+            {u'ext': u'mp4', u'height': 720},
             {u'ext': u'flv', u'height': 720},
         ]
         info_dict[u'formats'] = formats
             {u'ext': u'flv', u'height': 720},
         ]
         info_dict[u'formats'] = formats
+        yie = YoutubeIE(ydl)
+        yie._sort_formats(info_dict['formats'])
+        ydl.process_ie_result(info_dict)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(downloaded[u'ext'], u'mp4')
+
+        ydl = YDL()
+        ydl.params['prefer_free_formats'] = False
+        formats = [
+            {u'ext': u'flv', u'height': 720},
+            {u'ext': u'webm', u'height': 720},
+        ]
+        info_dict[u'formats'] = formats
+        yie = YoutubeIE(ydl)
+        yie._sort_formats(info_dict['formats'])
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'flv')
 
     def test_format_limit(self):
         formats = [
         ydl.process_ie_result(info_dict)
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'ext'], u'flv')
 
     def test_format_limit(self):
         formats = [
-            {u'format_id': u'meh', u'url': u'http://example.com/meh'},
-            {u'format_id': u'good', u'url': u'http://example.com/good'},
-            {u'format_id': u'great', u'url': u'http://example.com/great'},
-            {u'format_id': u'excellent', u'url': u'http://example.com/exc'},
+            {u'format_id': u'meh', u'url': u'http://example.com/meh', 'preference': 1},
+            {u'format_id': u'good', u'url': u'http://example.com/good', 'preference': 2},
+            {u'format_id': u'great', u'url': u'http://example.com/great', 'preference': 3},
+            {u'format_id': u'excellent', u'url': u'http://example.com/exc', 'preference': 4},
         ]
         info_dict = {
             u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
         ]
         info_dict = {
             u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
@@ -78,12 +99,12 @@ class TestFormatSelection(unittest.TestCase):
 
         ydl = YDL({'format_limit': 'good'})
         assert ydl.params['format_limit'] == 'good'
 
         ydl = YDL({'format_limit': 'good'})
         assert ydl.params['format_limit'] == 'good'
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'format_id'], u'good')
 
         ydl = YDL({'format_limit': 'great', 'format': 'all'})
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'format_id'], u'good')
 
         ydl = YDL({'format_limit': 'great', 'format': 'all'})
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
         self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
         self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
         self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
         self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
         self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
@@ -91,44 +112,80 @@ class TestFormatSelection(unittest.TestCase):
 
         ydl = YDL()
         ydl.params['format_limit'] = 'excellent'
 
         ydl = YDL()
         ydl.params['format_limit'] = 'excellent'
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'format_id'], u'excellent')
 
     def test_format_selection(self):
         formats = [
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded[u'format_id'], u'excellent')
 
     def test_format_selection(self):
         formats = [
-            {u'format_id': u'35', u'ext': u'mp4'},
-            {u'format_id': u'45', u'ext': u'webm'},
-            {u'format_id': u'47', u'ext': u'webm'},
-            {u'format_id': u'2', u'ext': u'flv'},
+            {u'format_id': u'35', u'ext': u'mp4', 'preference': 1},
+            {u'format_id': u'45', u'ext': u'webm', 'preference': 2},
+            {u'format_id': u'47', u'ext': u'webm', 'preference': 3},
+            {u'format_id': u'2', u'ext': u'flv', 'preference': 4},
         ]
         info_dict = {u'formats': formats, u'extractor': u'test'}
 
         ydl = YDL({'format': u'20/47'})
         ]
         info_dict = {u'formats': formats, u'extractor': u'test'}
 
         ydl = YDL({'format': u'20/47'})
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'47')
 
         ydl = YDL({'format': u'20/71/worst'})
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'47')
 
         ydl = YDL({'format': u'20/71/worst'})
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'35')
 
         ydl = YDL()
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'35')
 
         ydl = YDL()
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'2')
 
         ydl = YDL({'format': u'webm/mp4'})
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'2')
 
         ydl = YDL({'format': u'webm/mp4'})
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'47')
 
         ydl = YDL({'format': u'3gp/40/mp4'})
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'47')
 
         ydl = YDL({'format': u'3gp/40/mp4'})
-        ydl.process_ie_result(info_dict)
+        ydl.process_ie_result(info_dict.copy())
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'35')
 
         downloaded = ydl.downloaded_info_dicts[0]
         self.assertEqual(downloaded['format_id'], u'35')
 
+    def test_youtube_format_selection(self):
+        order = [
+            '38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
+            # Apple HTTP Live Streaming
+            '96', '95', '94', '93', '92', '132', '151',
+            # 3D
+            '85', '84', '102', '83', '101', '82', '100',
+            # Dash video
+            '138', '137', '248', '136', '247', '135', '246',
+            '245', '244', '134', '243', '133', '242', '160',
+            # Dash audio
+            '141', '172', '140', '139', '171',
+        ]
+
+        for f1id, f2id in zip(order, order[1:]):
+            f1 = YoutubeIE._formats[f1id].copy()
+            f1['format_id'] = f1id
+            f2 = YoutubeIE._formats[f2id].copy()
+            f2['format_id'] = f2id
+
+            info_dict = {'formats': [f1, f2], 'extractor': 'youtube'}
+            ydl = YDL()
+            yie = YoutubeIE(ydl)
+            yie._sort_formats(info_dict['formats'])
+            ydl.process_ie_result(info_dict)
+            downloaded = ydl.downloaded_info_dicts[0]
+            self.assertEqual(downloaded['format_id'], f1id)
+
+            info_dict = {'formats': [f2, f1], 'extractor': 'youtube'}
+            ydl = YDL()
+            yie = YoutubeIE(ydl)
+            yie._sort_formats(info_dict['formats'])
+            ydl.process_ie_result(info_dict)
+            downloaded = ydl.downloaded_info_dicts[0]
+            self.assertEqual(downloaded['format_id'], f1id)
+
     def test_add_extra_info(self):
         test_dict = {
             'extractor': 'Foo',
     def test_add_extra_info(self):
         test_dict = {
             'extractor': 'Foo',
index bd77b7c30149d556caa1237b4be4c06a56adc613..75547f42a1fdd4bdccbe7f04398a4e071bd40064 100644 (file)
@@ -113,6 +113,8 @@ class TestAllURLsMatching(unittest.TestCase):
     def test_vimeo_matching(self):
         self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
         self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
     def test_vimeo_matching(self):
         self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
         self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
+        self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
+        self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
 
     # https://github.com/rg3/youtube-dl/issues/1930
     def test_soundcloud_not_matching_sets(self):
 
     # https://github.com/rg3/youtube-dl/issues/1930
     def test_soundcloud_not_matching_sets(self):
index dd5818dba91c166936e45f1c7d8779c752fa3b86..0d925ae69bcf63e13ff35ec1df3c5de0c3b09b3c 100644 (file)
@@ -90,7 +90,7 @@ def generator(test_case):
         def _hook(status):
             if status['status'] == 'finished':
                 finished_hook_called.add(status['filename'])
         def _hook(status):
             if status['status'] == 'finished':
                 finished_hook_called.add(status['filename'])
-        ydl.fd.add_progress_hook(_hook)
+        ydl.add_progress_hook(_hook)
 
         def get_tc_filename(tc):
             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
 
         def get_tc_filename(tc):
             return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
@@ -148,7 +148,7 @@ def generator(test_case):
                     for key, value in info_dict.items()
                     if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
                 if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
                     for key, value in info_dict.items()
                     if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
                 if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
-                    sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n')
+                    sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
 
                 # Check for the presence of mandatory fields
                 for key in ('id', 'url', 'title', 'ext'):
 
                 # Check for the presence of mandatory fields
                 for key in ('id', 'url', 'title', 'ext'):
index 1b7b4e3d808cb936fa5fac07136049bd174a4490..5eeba091eefb5fcd3e4d4b57f94d97b79c91361d 100644 (file)
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 # encoding: utf-8
 
 #!/usr/bin/env python
 # encoding: utf-8
 
+from __future__ import unicode_literals
 
 # Allow direct execution
 import os
 
 # Allow direct execution
 import os
@@ -28,7 +29,10 @@ from youtube_dl.extractor import (
     BandcampAlbumIE,
     SmotriCommunityIE,
     SmotriUserIE,
     BandcampAlbumIE,
     SmotriCommunityIE,
     SmotriUserIE,
-    IviCompilationIE
+    IviCompilationIE,
+    ImdbListIE,
+    KhanAcademyIE,
+    EveryonesMixtapeIE,
 )
 
 
 )
 
 
@@ -42,7 +46,7 @@ class TestPlaylists(unittest.TestCase):
         ie = DailymotionPlaylistIE(dl)
         result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
         self.assertIsPlaylist(result)
         ie = DailymotionPlaylistIE(dl)
         result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'SPORT')
+        self.assertEqual(result['title'], 'SPORT')
         self.assertTrue(len(result['entries']) > 20)
 
     def test_dailymotion_user(self):
         self.assertTrue(len(result['entries']) > 20)
 
     def test_dailymotion_user(self):
@@ -50,7 +54,7 @@ class TestPlaylists(unittest.TestCase):
         ie = DailymotionUserIE(dl)
         result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
         self.assertIsPlaylist(result)
         ie = DailymotionUserIE(dl)
         result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Génération Quoi')
+        self.assertEqual(result['title'], 'Génération Quoi')
         self.assertTrue(len(result['entries']) >= 26)
 
     def test_vimeo_channel(self):
         self.assertTrue(len(result['entries']) >= 26)
 
     def test_vimeo_channel(self):
@@ -58,7 +62,7 @@ class TestPlaylists(unittest.TestCase):
         ie = VimeoChannelIE(dl)
         result = ie.extract('http://vimeo.com/channels/tributes')
         self.assertIsPlaylist(result)
         ie = VimeoChannelIE(dl)
         result = ie.extract('http://vimeo.com/channels/tributes')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Vimeo Tributes')
+        self.assertEqual(result['title'], 'Vimeo Tributes')
         self.assertTrue(len(result['entries']) > 24)
 
     def test_vimeo_user(self):
         self.assertTrue(len(result['entries']) > 24)
 
     def test_vimeo_user(self):
@@ -66,7 +70,7 @@ class TestPlaylists(unittest.TestCase):
         ie = VimeoUserIE(dl)
         result = ie.extract('http://vimeo.com/nkistudio/videos')
         self.assertIsPlaylist(result)
         ie = VimeoUserIE(dl)
         result = ie.extract('http://vimeo.com/nkistudio/videos')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Nki')
+        self.assertEqual(result['title'], 'Nki')
         self.assertTrue(len(result['entries']) > 65)
 
     def test_vimeo_album(self):
         self.assertTrue(len(result['entries']) > 65)
 
     def test_vimeo_album(self):
@@ -74,7 +78,7 @@ class TestPlaylists(unittest.TestCase):
         ie = VimeoAlbumIE(dl)
         result = ie.extract('http://vimeo.com/album/2632481')
         self.assertIsPlaylist(result)
         ie = VimeoAlbumIE(dl)
         result = ie.extract('http://vimeo.com/album/2632481')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Staff Favorites: November 2013')
+        self.assertEqual(result['title'], 'Staff Favorites: November 2013')
         self.assertTrue(len(result['entries']) > 12)
 
     def test_vimeo_groups(self):
         self.assertTrue(len(result['entries']) > 12)
 
     def test_vimeo_groups(self):
@@ -82,7 +86,7 @@ class TestPlaylists(unittest.TestCase):
         ie = VimeoGroupsIE(dl)
         result = ie.extract('http://vimeo.com/groups/rolexawards')
         self.assertIsPlaylist(result)
         ie = VimeoGroupsIE(dl)
         result = ie.extract('http://vimeo.com/groups/rolexawards')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Rolex Awards for Enterprise')
+        self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
         self.assertTrue(len(result['entries']) > 72)
 
     def test_ustream_channel(self):
         self.assertTrue(len(result['entries']) > 72)
 
     def test_ustream_channel(self):
@@ -90,7 +94,7 @@ class TestPlaylists(unittest.TestCase):
         ie = UstreamChannelIE(dl)
         result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
         self.assertIsPlaylist(result)
         ie = UstreamChannelIE(dl)
         result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'5124905')
+        self.assertEqual(result['id'], '5124905')
         self.assertTrue(len(result['entries']) >= 11)
 
     def test_soundcloud_set(self):
         self.assertTrue(len(result['entries']) >= 11)
 
     def test_soundcloud_set(self):
@@ -98,7 +102,7 @@ class TestPlaylists(unittest.TestCase):
         ie = SoundcloudSetIE(dl)
         result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
         self.assertIsPlaylist(result)
         ie = SoundcloudSetIE(dl)
         result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'The Royal Concept EP')
+        self.assertEqual(result['title'], 'The Royal Concept EP')
         self.assertTrue(len(result['entries']) >= 6)
 
     def test_soundcloud_user(self):
         self.assertTrue(len(result['entries']) >= 6)
 
     def test_soundcloud_user(self):
@@ -106,7 +110,7 @@ class TestPlaylists(unittest.TestCase):
         ie = SoundcloudUserIE(dl)
         result = ie.extract('https://soundcloud.com/the-concept-band')
         self.assertIsPlaylist(result)
         ie = SoundcloudUserIE(dl)
         result = ie.extract('https://soundcloud.com/the-concept-band')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'9615865')
+        self.assertEqual(result['id'], '9615865')
         self.assertTrue(len(result['entries']) >= 12)
 
     def test_livestream_event(self):
         self.assertTrue(len(result['entries']) >= 12)
 
     def test_livestream_event(self):
@@ -114,7 +118,7 @@ class TestPlaylists(unittest.TestCase):
         ie = LivestreamIE(dl)
         result = ie.extract('http://new.livestream.com/tedx/cityenglish')
         self.assertIsPlaylist(result)
         ie = LivestreamIE(dl)
         result = ie.extract('http://new.livestream.com/tedx/cityenglish')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'TEDCity2.0 (English)')
+        self.assertEqual(result['title'], 'TEDCity2.0 (English)')
         self.assertTrue(len(result['entries']) >= 4)
 
     def test_nhl_videocenter(self):
         self.assertTrue(len(result['entries']) >= 4)
 
     def test_nhl_videocenter(self):
@@ -122,8 +126,8 @@ class TestPlaylists(unittest.TestCase):
         ie = NHLVideocenterIE(dl)
         result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
         self.assertIsPlaylist(result)
         ie = NHLVideocenterIE(dl)
         result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'999')
-        self.assertEqual(result['title'], u'Highlights')
+        self.assertEqual(result['id'], '999')
+        self.assertEqual(result['title'], 'Highlights')
         self.assertEqual(len(result['entries']), 12)
 
     def test_bambuser_channel(self):
         self.assertEqual(len(result['entries']), 12)
 
     def test_bambuser_channel(self):
@@ -131,7 +135,7 @@ class TestPlaylists(unittest.TestCase):
         ie = BambuserChannelIE(dl)
         result = ie.extract('http://bambuser.com/channel/pixelversity')
         self.assertIsPlaylist(result)
         ie = BambuserChannelIE(dl)
         result = ie.extract('http://bambuser.com/channel/pixelversity')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'pixelversity')
+        self.assertEqual(result['title'], 'pixelversity')
         self.assertTrue(len(result['entries']) >= 60)
 
     def test_bandcamp_album(self):
         self.assertTrue(len(result['entries']) >= 60)
 
     def test_bandcamp_album(self):
@@ -139,7 +143,7 @@ class TestPlaylists(unittest.TestCase):
         ie = BandcampAlbumIE(dl)
         result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
         self.assertIsPlaylist(result)
         ie = BandcampAlbumIE(dl)
         result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['title'], u'Nightmare Night EP')
+        self.assertEqual(result['title'], 'Nightmare Night EP')
         self.assertTrue(len(result['entries']) >= 4)
         
     def test_smotri_community(self):
         self.assertTrue(len(result['entries']) >= 4)
         
     def test_smotri_community(self):
@@ -147,8 +151,8 @@ class TestPlaylists(unittest.TestCase):
         ie = SmotriCommunityIE(dl)
         result = ie.extract('http://smotri.com/community/video/kommuna')
         self.assertIsPlaylist(result)
         ie = SmotriCommunityIE(dl)
         result = ie.extract('http://smotri.com/community/video/kommuna')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'kommuna')
-        self.assertEqual(result['title'], u'КПРФ')
+        self.assertEqual(result['id'], 'kommuna')
+        self.assertEqual(result['title'], 'КПРФ')
         self.assertTrue(len(result['entries']) >= 4)
         
     def test_smotri_user(self):
         self.assertTrue(len(result['entries']) >= 4)
         
     def test_smotri_user(self):
@@ -156,17 +160,17 @@ class TestPlaylists(unittest.TestCase):
         ie = SmotriUserIE(dl)
         result = ie.extract('http://smotri.com/user/inspector')
         self.assertIsPlaylist(result)
         ie = SmotriUserIE(dl)
         result = ie.extract('http://smotri.com/user/inspector')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'inspector')
-        self.assertEqual(result['title'], u'Inspector')
+        self.assertEqual(result['id'], 'inspector')
+        self.assertEqual(result['title'], 'Inspector')
         self.assertTrue(len(result['entries']) >= 9)
 
     def test_AcademicEarthCourse(self):
         dl = FakeYDL()
         ie = AcademicEarthCourseIE(dl)
         self.assertTrue(len(result['entries']) >= 9)
 
     def test_AcademicEarthCourse(self):
         dl = FakeYDL()
         ie = AcademicEarthCourseIE(dl)
-        result = ie.extract(u'http://academicearth.org/courses/building-dynamic-websites/')
+        result = ie.extract('http://academicearth.org/courses/building-dynamic-websites/')
         self.assertIsPlaylist(result)
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'building-dynamic-websites')
-        self.assertEqual(result['title'], u'Building Dynamic Websites')
+        self.assertEqual(result['id'], 'building-dynamic-websites')
+        self.assertEqual(result['title'], 'Building Dynamic Websites')
         self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
         self.assertEqual(len(result['entries']), 10)
         
         self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
         self.assertEqual(len(result['entries']), 10)
         
@@ -175,8 +179,8 @@ class TestPlaylists(unittest.TestCase):
         ie = IviCompilationIE(dl)
         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel')
         self.assertIsPlaylist(result)
         ie = IviCompilationIE(dl)
         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'dezhurnyi_angel')
-        self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012)')
+        self.assertEqual(result['id'], 'dezhurnyi_angel')
+        self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
         self.assertTrue(len(result['entries']) >= 36)
         
     def test_ivi_compilation_season(self):
         self.assertTrue(len(result['entries']) >= 36)
         
     def test_ivi_compilation_season(self):
@@ -184,9 +188,37 @@ class TestPlaylists(unittest.TestCase):
         ie = IviCompilationIE(dl)
         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2')
         self.assertIsPlaylist(result)
         ie = IviCompilationIE(dl)
         result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2')
         self.assertIsPlaylist(result)
-        self.assertEqual(result['id'], u'dezhurnyi_angel/season2')
-        self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012) 2 сезон')
+        self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
+        self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
         self.assertTrue(len(result['entries']) >= 20)
         self.assertTrue(len(result['entries']) >= 20)
+        
+    def test_imdb_list(self):
+        dl = FakeYDL()
+        ie = ImdbListIE(dl)
+        result = ie.extract('http://www.imdb.com/list/sMjedvGDd8U')
+        self.assertIsPlaylist(result)
+        self.assertEqual(result['id'], 'sMjedvGDd8U')
+        self.assertEqual(result['title'], 'Animated and Family Films')
+        self.assertTrue(len(result['entries']) >= 48)
+
+    def test_khanacademy_topic(self):
+        dl = FakeYDL()
+        ie = KhanAcademyIE(dl)
+        result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
+        self.assertIsPlaylist(result)
+        self.assertEqual(result['id'], 'cryptography')
+        self.assertEqual(result['title'], 'Journey into cryptography')
+        self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
+        self.assertTrue(len(result['entries']) >= 3)
+
+    def test_EveryonesMixtape(self):
+        dl = FakeYDL()
+        ie = EveryonesMixtapeIE(dl)
+        result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
+        self.assertIsPlaylist(result)
+        self.assertEqual(result['id'], 'm7m0jJAbMQi')
+        self.assertEqual(result['title'], 'Driving')
+        self.assertEqual(len(result['entries']), 24)
 
 
 if __name__ == '__main__':
 
 
 if __name__ == '__main__':
index 23a6531248ccd162155d61bcc3ac5621fb08a8f2..1e4e62faae69da625867d3cf5d2047e397808727 100644 (file)
@@ -36,10 +36,6 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
     url = 'QRS8MkLhQmM'
     IE = YoutubeIE
 
     url = 'QRS8MkLhQmM'
     IE = YoutubeIE
 
-    def getSubtitles(self):
-        info_dict = self.getInfoDict()
-        return info_dict[0]['subtitles']
-
     def test_youtube_no_writesubtitles(self):
         self.DL.params['writesubtitles'] = False
         subtitles = self.getSubtitles()
     def test_youtube_no_writesubtitles(self):
         self.DL.params['writesubtitles'] = False
         subtitles = self.getSubtitles()
@@ -171,13 +167,13 @@ class TestTedSubtitles(BaseTestSubtitles):
     def test_subtitles(self):
         self.DL.params['writesubtitles'] = True
         subtitles = self.getSubtitles()
     def test_subtitles(self):
         self.DL.params['writesubtitles'] = True
         subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d')
+        self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
 
     def test_subtitles_lang(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['subtitleslangs'] = ['fr']
         subtitles = self.getSubtitles()
 
     def test_subtitles_lang(self):
         self.DL.params['writesubtitles'] = True
         self.DL.params['subtitleslangs'] = ['fr']
         subtitles = self.getSubtitles()
-        self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6')
+        self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
 
     def test_allsubtitles(self):
         self.DL.params['writesubtitles'] = True
 
     def test_allsubtitles(self):
         self.DL.params['writesubtitles'] = True
diff --git a/test/test_unicode_literals.py b/test/test_unicode_literals.py
new file mode 100644 (file)
index 0000000..a4ba7ba
--- /dev/null
@@ -0,0 +1,47 @@
+from __future__ import unicode_literals
+
+import io
+import os
+import re
+import unittest
+
+rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+IGNORED_FILES = [
+    'setup.py',  # http://bugs.python.org/issue13943
+]
+
+
+class TestUnicodeLiterals(unittest.TestCase):
+    def test_all_files(self):
+        print('Skipping this test (not yet fully implemented)')
+        return
+
+        for dirpath, _, filenames in os.walk(rootDir):
+            for basename in filenames:
+                if not basename.endswith('.py'):
+                    continue
+                if basename in IGNORED_FILES:
+                    continue
+
+                fn = os.path.join(dirpath, basename)
+                with io.open(fn, encoding='utf-8') as inf:
+                    code = inf.read()
+
+                if "'" not in code and '"' not in code:
+                    continue
+                imps = 'from __future__ import unicode_literals'
+                self.assertTrue(
+                    imps in code,
+                    ' %s  missing in %s' % (imps, fn))
+
+                m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
+                if m is not None:
+                    self.assertTrue(
+                        m is None,
+                        'u present in %s, around %s' % (
+                            fn, code[m.start() - 10:m.end() + 10]))
+
+
+if __name__ == '__main__':
+    unittest.main()
index e5778cd83ee9ea74e4786243f1e6279aed3697d3..bee355ee0e0605a5134dc37b8556e9e233728902 100644 (file)
@@ -18,6 +18,7 @@ from youtube_dl.utils import (
     find_xpath_attr,
     get_meta_content,
     orderedSet,
     find_xpath_attr,
     get_meta_content,
     orderedSet,
+    parse_duration,
     sanitize_filename,
     shell_quote,
     smuggle_url,
     sanitize_filename,
     shell_quote,
     smuggle_url,
@@ -192,5 +193,12 @@ class TestUtil(unittest.TestCase):
             url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
             u'trailer.mp4')
 
             url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
             u'trailer.mp4')
 
+    def test_parse_duration(self):
+        self.assertEqual(parse_duration(None), None)
+        self.assertEqual(parse_duration('1'), 1)
+        self.assertEqual(parse_duration('1337:12'), 80232)
+        self.assertEqual(parse_duration('9:12:43'), 33163)
+        self.assertEqual(parse_duration('x:y'), None)
+
 if __name__ == '__main__':
     unittest.main()
 if __name__ == '__main__':
     unittest.main()
index 056700614b43fa0a3dbceeb82ef991e34fdb53f9..a3fc53047de031c8c8e01046b53a891ef6b693a8 100644 (file)
@@ -27,12 +27,6 @@ _TESTS = [
         85,
         u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
     ),
         85,
         u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
     ),
-    (
-        u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf',
-        u'swf',
-        82,
-        u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321'
-    ),
 ]
 
 
 ]
 
 
index 2e3e8a9b6cf644d8b656750bfe3386298e6609d3..1e06aaad529038d248e799b88bb24d209941183a 100755 (executable)
Binary files a/youtube-dl and b/youtube-dl differ
index c99538c06871d2bd1765f1d50994d43c493dc2c6..ce1c4f926282db80cf04c743dfd5e2232a11b32e 100644 (file)
@@ -1,4 +1,4 @@
-.TH YOUTUBE\-DL 1 "" 
+.TH "YOUTUBE\-DL" "1" "" "" ""
 .SH NAME
 .PP
 youtube\-dl \- download videos from youtube.com or other video platforms
 .SH NAME
 .PP
 youtube\-dl \- download videos from youtube.com or other video platforms
@@ -38,12 +38,16 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ empty\ string\ (\-\-proxy\ "")\ for\ direct\ connection
 \-\-no\-check\-certificate\ \ \ \ \ Suppress\ HTTPS\ certificate\ validation.
 \-\-cache\-dir\ DIR\ \ \ \ \ \ \ \ \ \ \ \ Location\ in\ the\ filesystem\ where\ youtube\-dl\ can
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ empty\ string\ (\-\-proxy\ "")\ for\ direct\ connection
 \-\-no\-check\-certificate\ \ \ \ \ Suppress\ HTTPS\ certificate\ validation.
 \-\-cache\-dir\ DIR\ \ \ \ \ \ \ \ \ \ \ \ Location\ in\ the\ filesystem\ where\ youtube\-dl\ can
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ store\ downloaded\ information\ permanently.\ By
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ store\ some\ downloaded\ information\ permanently.\ By
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ default\ $XDG_CACHE_HOME/youtube\-dl\ or\ ~/.cache
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ default\ $XDG_CACHE_HOME/youtube\-dl\ or\ ~/.cache
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /youtube\-dl\ .
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /youtube\-dl\ .\ At\ the\ moment,\ only\ YouTube\ player
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ files\ (for\ videos\ with\ obfuscated\ signatures)\ are
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ cached,\ but\ that\ may\ change.
 \-\-no\-cache\-dir\ \ \ \ \ \ \ \ \ \ \ \ \ Disable\ filesystem\ caching
 \-\-no\-cache\-dir\ \ \ \ \ \ \ \ \ \ \ \ \ Disable\ filesystem\ caching
+\-\-socket\-timeout\ None\ \ \ \ \ \ Time\ to\ wait\ before\ giving\ up,\ in\ seconds
 \-\-bidi\-workaround\ \ \ \ \ \ \ \ \ \ Work\ around\ terminals\ that\ lack\ bidirectional
 \-\-bidi\-workaround\ \ \ \ \ \ \ \ \ \ Work\ around\ terminals\ that\ lack\ bidirectional
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ text\ support.\ Requires\ fribidi\ executable\ in\ PATH
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ text\ support.\ Requires\ bidiv\ or\ fribidi
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ executable\ in\ PATH
 \f[]
 .fi
 .SS Video Selection:
 \f[]
 .fi
 .SS Video Selection:
@@ -62,8 +66,10 @@ redistribute it or use it however you like.
 \-\-max\-filesize\ SIZE\ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ larger\ than\ SIZE\ (e.g.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 50k\ or\ 44.6m)
 \-\-date\ DATE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ in\ this\ date
 \-\-max\-filesize\ SIZE\ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ larger\ than\ SIZE\ (e.g.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 50k\ or\ 44.6m)
 \-\-date\ DATE\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ in\ this\ date
-\-\-datebefore\ DATE\ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ before\ this\ date
-\-\-dateafter\ DATE\ \ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ after\ this\ date
+\-\-datebefore\ DATE\ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ on\ or\ before\ this
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ date\ (i.e.\ inclusive)
+\-\-dateafter\ DATE\ \ \ \ \ \ \ \ \ \ \ download\ only\ videos\ uploaded\ on\ or\ after\ this
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ date\ (i.e.\ inclusive)
 \-\-min\-views\ COUNT\ \ \ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ with\ less\ than\ COUNT
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ views
 \-\-max\-views\ COUNT\ \ \ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ with\ more\ than\ COUNT
 \-\-min\-views\ COUNT\ \ \ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ with\ less\ than\ COUNT
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ views
 \-\-max\-views\ COUNT\ \ \ \ \ \ \ \ \ \ Do\ not\ download\ any\ videos\ with\ more\ than\ COUNT
@@ -103,13 +109,13 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ different,\ %(autonumber)s\ to\ get\ an\ automatically
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ incremented\ number,\ %(ext)s\ for\ the\ filename
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ extension,\ %(format)s\ for\ the\ format\ description
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ different,\ %(autonumber)s\ to\ get\ an\ automatically
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ incremented\ number,\ %(ext)s\ for\ the\ filename
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ extension,\ %(format)s\ for\ the\ format\ description
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (like\ "22\ \-\ 1280x720"\ or\ "HD"),%(format_id)s\ for
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (like\ "22\ \-\ 1280x720"\ or\ "HD"),%(format_id)s\ for
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ unique\ id\ of\ the\ format\ (like\ Youtube\[aq]s
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ unique\ id\ of\ the\ format\ (like\ Youtube\[aq]s
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ itags:\ "137"),%(upload_date)s\ for\ the\ upload\ date
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (YYYYMMDD),\ %(extractor)s\ for\ the\ provider
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (youtube,\ metacafe,\ etc),\ %(id)s\ for\ the\ video\ id
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ,\ %(playlist)s\ for\ the\ playlist\ the\ video\ is\ in,
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ %(playlist_index)s\ for\ the\ position\ in\ the
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ itags:\ "137"),\ %(upload_date)s\ for\ the\ upload
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ date\ (YYYYMMDD),\ %(extractor)s\ for\ the\ provider
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (youtube,\ metacafe,\ etc),\ %(id)s\ for\ the\ video
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ id,\ %(playlist)s\ for\ the\ playlist\ the\ video\ is
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ in,\ %(playlist_index)s\ for\ the\ position\ in\ the
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ playlist\ and\ %%\ for\ a\ literal\ percent.\ Use\ \-\ to
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ output\ to\ stdout.\ Can\ also\ be\ used\ to\ download\ to
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ a\ different\ directory,\ for\ example\ with\ \-o\ \[aq]/my/d
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ playlist\ and\ %%\ for\ a\ literal\ percent.\ Use\ \-\ to
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ output\ to\ stdout.\ Can\ also\ be\ used\ to\ download\ to
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ a\ different\ directory,\ for\ example\ with\ \-o\ \[aq]/my/d
@@ -121,7 +127,7 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ avoid\ "&"\ and\ spaces\ in\ filenames
 \-a,\ \-\-batch\-file\ FILE\ \ \ \ \ \ file\ containing\ URLs\ to\ download\ (\[aq]\-\[aq]\ for\ stdin)
 \-\-load\-info\ FILE\ \ \ \ \ \ \ \ \ \ \ json\ file\ containing\ the\ video\ information
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ avoid\ "&"\ and\ spaces\ in\ filenames
 \-a,\ \-\-batch\-file\ FILE\ \ \ \ \ \ file\ containing\ URLs\ to\ download\ (\[aq]\-\[aq]\ for\ stdin)
 \-\-load\-info\ FILE\ \ \ \ \ \ \ \ \ \ \ json\ file\ containing\ the\ video\ information
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (created\ with\ the\ "\-\-write\-json"\ option
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (created\ with\ the\ "\-\-write\-json"\ option)
 \-w,\ \-\-no\-overwrites\ \ \ \ \ \ \ \ do\ not\ overwrite\ files
 \-c,\ \-\-continue\ \ \ \ \ \ \ \ \ \ \ \ \ force\ resume\ of\ partially\ downloaded\ files.\ By
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ default,\ youtube\-dl\ will\ resume\ downloads\ if
 \-w,\ \-\-no\-overwrites\ \ \ \ \ \ \ \ do\ not\ overwrite\ files
 \-c,\ \-\-continue\ \ \ \ \ \ \ \ \ \ \ \ \ force\ resume\ of\ partially\ downloaded\ files.\ By
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ default,\ youtube\-dl\ will\ resume\ downloads\ if
@@ -159,7 +165,7 @@ redistribute it or use it however you like.
 \-\-no\-progress\ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ print\ progress\ bar
 \-\-console\-title\ \ \ \ \ \ \ \ \ \ \ \ display\ progress\ in\ console\ titlebar
 \-v,\ \-\-verbose\ \ \ \ \ \ \ \ \ \ \ \ \ \ print\ various\ debugging\ information
 \-\-no\-progress\ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ print\ progress\ bar
 \-\-console\-title\ \ \ \ \ \ \ \ \ \ \ \ display\ progress\ in\ console\ titlebar
 \-v,\ \-\-verbose\ \ \ \ \ \ \ \ \ \ \ \ \ \ print\ various\ debugging\ information
-\-\-dump\-intermediate\-pages\ \ print\ downloaded\ pages\ to\ debug\ problems(very
+\-\-dump\-intermediate\-pages\ \ print\ downloaded\ pages\ to\ debug\ problems(very
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ verbose)
 \-\-write\-pages\ \ \ \ \ \ \ \ \ \ \ \ \ \ Write\ downloaded\ intermediary\ pages\ to\ files\ in
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ current\ directory\ to\ debug\ problems
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ verbose)
 \-\-write\-pages\ \ \ \ \ \ \ \ \ \ \ \ \ \ Write\ downloaded\ intermediary\ pages\ to\ files\ in
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ current\ directory\ to\ debug\ problems
@@ -176,8 +182,7 @@ redistribute it or use it however you like.
 \-\-prefer\-free\-formats\ \ \ \ \ \ prefer\ free\ video\ formats\ unless\ a\ specific\ one
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ is\ requested
 \-\-max\-quality\ FORMAT\ \ \ \ \ \ \ highest\ quality\ format\ to\ download
 \-\-prefer\-free\-formats\ \ \ \ \ \ prefer\ free\ video\ formats\ unless\ a\ specific\ one
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ is\ requested
 \-\-max\-quality\ FORMAT\ \ \ \ \ \ \ highest\ quality\ format\ to\ download
-\-F,\ \-\-list\-formats\ \ \ \ \ \ \ \ \ list\ all\ available\ formats\ (currently\ youtube
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ only)
+\-F,\ \-\-list\-formats\ \ \ \ \ \ \ \ \ list\ all\ available\ formats
 \f[]
 .fi
 .SS Subtitle Options:
 \f[]
 .fi
 .SS Subtitle Options:
@@ -203,7 +208,7 @@ redistribute it or use it however you like.
 \-u,\ \-\-username\ USERNAME\ \ \ \ account\ username
 \-p,\ \-\-password\ PASSWORD\ \ \ \ account\ password
 \-n,\ \-\-netrc\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ .netrc\ authentication\ data
 \-u,\ \-\-username\ USERNAME\ \ \ \ account\ username
 \-p,\ \-\-password\ PASSWORD\ \ \ \ account\ password
 \-n,\ \-\-netrc\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ .netrc\ authentication\ data
-\-\-video\-password\ PASSWORD\ \ video\ password\ (vimeo\ only)
+\-\-video\-password\ PASSWORD\ \ video\ password\ (vimeo,\ smotri)
 \f[]
 .fi
 .SS Post\-processing Options:
 \f[]
 .fi
 .SS Post\-processing Options:
@@ -225,7 +230,13 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ processed\ files\ are\ overwritten\ by\ default
 \-\-embed\-subs\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ embed\ subtitles\ in\ the\ video\ (only\ for\ mp4
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ videos)
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ processed\ files\ are\ overwritten\ by\ default
 \-\-embed\-subs\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ embed\ subtitles\ in\ the\ video\ (only\ for\ mp4
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ videos)
-\-\-add\-metadata\ \ \ \ \ \ \ \ \ \ \ \ \ add\ metadata\ to\ the\ files
+\-\-add\-metadata\ \ \ \ \ \ \ \ \ \ \ \ \ write\ metadata\ to\ the\ video\ file
+\-\-xattrs\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ write\ metadata\ to\ the\ video\ file\[aq]s\ xattrs\ (using
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ dublin\ core\ and\ xdg\ standards)
+\-\-prefer\-avconv\ \ \ \ \ \ \ \ \ \ \ \ Prefer\ avconv\ over\ ffmpeg\ for\ running\ the
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ postprocessors\ (default)
+\-\-prefer\-ffmpeg\ \ \ \ \ \ \ \ \ \ \ \ Prefer\ ffmpeg\ over\ avconv\ for\ running\ the
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ postprocessors
 \f[]
 .fi
 .SH CONFIGURATION
 \f[]
 .fi
 .SH CONFIGURATION
@@ -306,14 +317,12 @@ Relative dates: Dates in the format
 \f[C](now|today)[+\-][0\-9](day|week|month|year)(s)?\f[]
 .PP
 Examples:
 \f[C](now|today)[+\-][0\-9](day|week|month|year)(s)?\f[]
 .PP
 Examples:
-.IP
-.nf
-\f[C]
-$\ youtube\-dl\ \-\-dateafter\ now\-6months\ #will\ only\ download\ the\ videos\ uploaded\ in\ the\ last\ 6\ months
-$\ youtube\-dl\ \-\-date\ 19700101\ #will\ only\ download\ the\ videos\ uploaded\ in\ January\ 1,\ 1970
-$\ youtube\-dl\ \-\-dateafter\ 20000101\ \-\-datebefore\ 20100101\ #will\ only\ download\ the\ videos\ uploaded\ between\ 2000\ and\ 2010
-\f[]
-.fi
+.PP
+$ # Download only the videos uploaded in the last 6 months $ youtube\-dl
+\-\-dateafter now\-6months $ # Download only the videos uploaded on
+January 1, 1970 $ youtube\-dl \-\-date 19700101 $ # will only download
+the videos uploaded in the 200x decade $ youtube\-dl \-\-dateafter
+20000101 \-\-datebefore 20091231
 .SH FAQ
 .SS Can you please put the \-b option back?
 .PP
 .SH FAQ
 .SS Can you please put the \-b option back?
 .PP
@@ -451,7 +460,7 @@ http://www.youtube.com/ ) is \f[I]not\f[] an example URL.
 .PP
 Before reporting any issue, type youtube\-dl \-U.
 This should report that you\[aq]re up\-to\-date.
 .PP
 Before reporting any issue, type youtube\-dl \-U.
 This should report that you\[aq]re up\-to\-date.
-Ábout 20% of the reports we receive are already fixed, but people are
+About 20% of the reports we receive are already fixed, but people are
 using outdated versions.
 This goes for feature requests as well.
 .SS Is the issue already documented?
 using outdated versions.
 This goes for feature requests as well.
 .SS Is the issue already documented?
@@ -521,3 +530,14 @@ talk to) require.
 Do not post features because they seem like a good idea.
 If they are really useful, they will be requested by someone who
 requires them.
 Do not post features because they seem like a good idea.
 If they are really useful, they will be requested by someone who
 requires them.
+.SS Is your question about youtube\-dl?
+.PP
+It may sound strange, but some bug reports we receive are completely
+unrelated to youtube\-dl and relate to a different or even the
+reporter\[aq]s own application.
+Please make sure that you are actually using youtube\-dl.
+If you are using a UI for youtube\-dl, report the bug to the maintainer
+of the actual application providing the UI.
+On the other hand, if your UI for youtube\-dl fails in some way you
+believe is related to youtube\-dl, by all means, go ahead and report the
+bug.
index 0cb7e92759b45c92fe1e10c9393a2c87f8e8044e..63462859ae7cec9fe05ee96ba1a5e51c94ac2abc 100644 (file)
@@ -4,9 +4,9 @@ __youtube_dl()
     COMPREPLY=()
     cur="${COMP_WORDS[COMP_CWORD]}"
     prev="${COMP_WORDS[COMP_CWORD-1]}"
     COMPREPLY=()
     cur="${COMP_WORDS[COMP_CWORD]}"
     prev="${COMP_WORDS[COMP_CWORD-1]}"
-    opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --user-agent --referer --list-extractors --extractor-descriptions --proxy --no-check-certificate --cache-dir --no-cache-dir --socket-timeout --bidi-workaround --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --rate-limit --retries --buffer-size --no-resize-buffer --test --title --id --literal --auto-number --output --autonumber-size --restrict-filenames --batch-file --load-info --no-overwrites --continue --no-continue --cookies --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --quiet --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --format --all-formats --prefer-free-formats --max-quality --list-formats --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --add-metadata"
+    opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --user-agent --referer --list-extractors --extractor-descriptions --proxy --no-check-certificate --cache-dir --no-cache-dir --socket-timeout --bidi-workaround --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --rate-limit --retries --buffer-size --no-resize-buffer --test --title --id --literal --auto-number --output --autonumber-size --restrict-filenames --batch-file --load-info --no-overwrites --continue --no-continue --cookies --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --quiet --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --format --all-formats --prefer-free-formats --max-quality --list-formats --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg"
     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
     keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
-    fileopts="-a|--batch-file|--download-archive|--cookies"
+    fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
     diropts="--cache-dir"
 
     if [[ ${prev} =~ ${fileopts} ]]; then
     diropts="--cache-dir"
 
     if [[ ${prev} =~ ${fileopts} ]]; then
index 47124932fc7e9ff3c40ec29d003757cdb20cf967..5c8e676a20bdb112452e2a8b0815422ca3bf7c73 100644 (file)
-import os
-import re
-import subprocess
-import sys
-import time
-
-from .utils import (
-    compat_urllib_error,
-    compat_urllib_request,
-    ContentTooShortError,
-    determine_ext,
-    encodeFilename,
-    format_bytes,
-    sanitize_open,
-    timeconvert,
-)
-
-
-class FileDownloader(object):
-    """File Downloader class.
-
-    File downloader objects are the ones responsible of downloading the
-    actual video file and writing it to disk.
-
-    File downloaders accept a lot of parameters. In order not to saturate
-    the object constructor with arguments, it receives a dictionary of
-    options instead.
-
-    Available options:
-
-    verbose:           Print additional info to stdout.
-    quiet:             Do not print messages to stdout.
-    ratelimit:         Download speed limit, in bytes/sec.
-    retries:           Number of times to retry for HTTP error 5xx
-    buffersize:        Size of download buffer in bytes.
-    noresizebuffer:    Do not automatically resize the download buffer.
-    continuedl:        Try to continue downloads if possible.
-    noprogress:        Do not print the progress bar.
-    logtostderr:       Log messages to stderr instead of stdout.
-    consoletitle:      Display progress in console window's titlebar.
-    nopart:            Do not use temporary .part files.
-    updatetime:        Use the Last-modified header to set output file timestamps.
-    test:              Download only first bytes to test the downloader.
-    min_filesize:      Skip files smaller than this size
-    max_filesize:      Skip files larger than this size
-    """
-
-    params = None
-
-    def __init__(self, ydl, params):
-        """Create a FileDownloader object with the given options."""
-        self.ydl = ydl
-        self._progress_hooks = []
-        self.params = params
-
-    @staticmethod
-    def format_seconds(seconds):
-        (mins, secs) = divmod(seconds, 60)
-        (hours, mins) = divmod(mins, 60)
-        if hours > 99:
-            return '--:--:--'
-        if hours == 0:
-            return '%02d:%02d' % (mins, secs)
-        else:
-            return '%02d:%02d:%02d' % (hours, mins, secs)
-
-    @staticmethod
-    def calc_percent(byte_counter, data_len):
-        if data_len is None:
-            return None
-        return float(byte_counter) / float(data_len) * 100.0
-
-    @staticmethod
-    def format_percent(percent):
-        if percent is None:
-            return '---.-%'
-        return '%6s' % ('%3.1f%%' % percent)
-
-    @staticmethod
-    def calc_eta(start, now, total, current):
-        if total is None:
-            return None
-        dif = now - start
-        if current == 0 or dif < 0.001: # One millisecond
-            return None
-        rate = float(current) / dif
-        return int((float(total) - float(current)) / rate)
-
-    @staticmethod
-    def format_eta(eta):
-        if eta is None:
-            return '--:--'
-        return FileDownloader.format_seconds(eta)
-
-    @staticmethod
-    def calc_speed(start, now, bytes):
-        dif = now - start
-        if bytes == 0 or dif < 0.001: # One millisecond
-            return None
-        return float(bytes) / dif
-
-    @staticmethod
-    def format_speed(speed):
-        if speed is None:
-            return '%10s' % '---b/s'
-        return '%10s' % ('%s/s' % format_bytes(speed))
-
-    @staticmethod
-    def best_block_size(elapsed_time, bytes):
-        new_min = max(bytes / 2.0, 1.0)
-        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
-        if elapsed_time < 0.001:
-            return int(new_max)
-        rate = bytes / elapsed_time
-        if rate > new_max:
-            return int(new_max)
-        if rate < new_min:
-            return int(new_min)
-        return int(rate)
-
-    @staticmethod
-    def parse_bytes(bytestr):
-        """Parse a string indicating a byte quantity into an integer."""
-        matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
-        if matchobj is None:
-            return None
-        number = float(matchobj.group(1))
-        multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
-        return int(round(number * multiplier))
-
-    def to_screen(self, *args, **kargs):
-        self.ydl.to_screen(*args, **kargs)
-
-    def to_stderr(self, message):
-        self.ydl.to_screen(message)
-
-    def to_console_title(self, message):
-        self.ydl.to_console_title(message)
-
-    def trouble(self, *args, **kargs):
-        self.ydl.trouble(*args, **kargs)
-
-    def report_warning(self, *args, **kargs):
-        self.ydl.report_warning(*args, **kargs)
-
-    def report_error(self, *args, **kargs):
-        self.ydl.report_error(*args, **kargs)
-
-    def slow_down(self, start_time, byte_counter):
-        """Sleep if the download speed is over the rate limit."""
-        rate_limit = self.params.get('ratelimit', None)
-        if rate_limit is None or byte_counter == 0:
-            return
-        now = time.time()
-        elapsed = now - start_time
-        if elapsed <= 0.0:
-            return
-        speed = float(byte_counter) / elapsed
-        if speed > rate_limit:
-            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
-
-    def temp_name(self, filename):
-        """Returns a temporary filename for the given filename."""
-        if self.params.get('nopart', False) or filename == u'-' or \
-                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
-            return filename
-        return filename + u'.part'
-
-    def undo_temp_name(self, filename):
-        if filename.endswith(u'.part'):
-            return filename[:-len(u'.part')]
-        return filename
-
-    def try_rename(self, old_filename, new_filename):
-        try:
-            if old_filename == new_filename:
-                return
-            os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
-        except (IOError, OSError):
-            self.report_error(u'unable to rename file')
-
-    def try_utime(self, filename, last_modified_hdr):
-        """Try to set the last-modified time of the given file."""
-        if last_modified_hdr is None:
-            return
-        if not os.path.isfile(encodeFilename(filename)):
-            return
-        timestr = last_modified_hdr
-        if timestr is None:
-            return
-        filetime = timeconvert(timestr)
-        if filetime is None:
-            return filetime
-        # Ignore obviously invalid dates
-        if filetime == 0:
-            return
-        try:
-            os.utime(filename, (time.time(), filetime))
-        except:
-            pass
-        return filetime
-
-    def report_destination(self, filename):
-        """Report destination filename."""
-        self.to_screen(u'[download] Destination: ' + filename)
-
-    def _report_progress_status(self, msg, is_last_line=False):
-        fullmsg = u'[download] ' + msg
-        if self.params.get('progress_with_newline', False):
-            self.to_screen(fullmsg)
-        else:
-            if os.name == 'nt':
-                prev_len = getattr(self, '_report_progress_prev_line_length',
-                                   0)
-                if prev_len > len(fullmsg):
-                    fullmsg += u' ' * (prev_len - len(fullmsg))
-                self._report_progress_prev_line_length = len(fullmsg)
-                clear_line = u'\r'
-            else:
-                clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
-            self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
-        self.to_console_title(u'youtube-dl ' + msg)
-
-    def report_progress(self, percent, data_len_str, speed, eta):
-        """Report download progress."""
-        if self.params.get('noprogress', False):
-            return
-        if eta is not None:
-            eta_str = self.format_eta(eta)
-        else:
-            eta_str = 'Unknown ETA'
-        if percent is not None:
-            percent_str = self.format_percent(percent)
-        else:
-            percent_str = 'Unknown %'
-        speed_str = self.format_speed(speed)
-
-        msg = (u'%s of %s at %s ETA %s' %
-               (percent_str, data_len_str, speed_str, eta_str))
-        self._report_progress_status(msg)
-
-    def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
-        if self.params.get('noprogress', False):
-            return
-        downloaded_str = format_bytes(downloaded_data_len)
-        speed_str = self.format_speed(speed)
-        elapsed_str = FileDownloader.format_seconds(elapsed)
-        msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
-        self._report_progress_status(msg)
-
-    def report_finish(self, data_len_str, tot_time):
-        """Report download finished."""
-        if self.params.get('noprogress', False):
-            self.to_screen(u'[download] Download completed')
-        else:
-            self._report_progress_status(
-                (u'100%% of %s in %s' %
-                 (data_len_str, self.format_seconds(tot_time))),
-                is_last_line=True)
-
-    def report_resuming_byte(self, resume_len):
-        """Report attempt to resume at given byte."""
-        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
-
-    def report_retry(self, count, retries):
-        """Report retry in case of HTTP error 5xx"""
-        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
-
-    def report_file_already_downloaded(self, file_name):
-        """Report file has already been fully downloaded."""
-        try:
-            self.to_screen(u'[download] %s has already been downloaded' % file_name)
-        except UnicodeEncodeError:
-            self.to_screen(u'[download] The file has already been downloaded')
-
-    def report_unable_to_resume(self):
-        """Report it was impossible to resume download."""
-        self.to_screen(u'[download] Unable to resume')
-
-    def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url, live, conn):
-        def run_rtmpdump(args):
-            start = time.time()
-            resume_percent = None
-            resume_downloaded_data_len = None
-            proc = subprocess.Popen(args, stderr=subprocess.PIPE)
-            cursor_in_new_line = True
-            proc_stderr_closed = False
-            while not proc_stderr_closed:
-                # read line from stderr
-                line = u''
-                while True:
-                    char = proc.stderr.read(1)
-                    if not char:
-                        proc_stderr_closed = True
-                        break
-                    if char in [b'\r', b'\n']:
-                        break
-                    line += char.decode('ascii', 'replace')
-                if not line:
-                    # proc_stderr_closed is True
-                    continue
-                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
-                if mobj:
-                    downloaded_data_len = int(float(mobj.group(1))*1024)
-                    percent = float(mobj.group(2))
-                    if not resume_percent:
-                        resume_percent = percent
-                        resume_downloaded_data_len = downloaded_data_len
-                    eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
-                    speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
-                    data_len = None
-                    if percent > 0:
-                        data_len = int(downloaded_data_len * 100 / percent)
-                    data_len_str = u'~' + format_bytes(data_len)
-                    self.report_progress(percent, data_len_str, speed, eta)
-                    cursor_in_new_line = False
-                    self._hook_progress({
-                        'downloaded_bytes': downloaded_data_len,
-                        'total_bytes': data_len,
-                        'tmpfilename': tmpfilename,
-                        'filename': filename,
-                        'status': 'downloading',
-                        'eta': eta,
-                        'speed': speed,
-                    })
-                else:
-                    # no percent for live streams
-                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
-                    if mobj:
-                        downloaded_data_len = int(float(mobj.group(1))*1024)
-                        time_now = time.time()
-                        speed = self.calc_speed(start, time_now, downloaded_data_len)
-                        self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
-                        cursor_in_new_line = False
-                        self._hook_progress({
-                            'downloaded_bytes': downloaded_data_len,
-                            'tmpfilename': tmpfilename,
-                            'filename': filename,
-                            'status': 'downloading',
-                            'speed': speed,
-                        })
-                    elif self.params.get('verbose', False):
-                        if not cursor_in_new_line:
-                            self.to_screen(u'')
-                        cursor_in_new_line = True
-                        self.to_screen(u'[rtmpdump] '+line)
-            proc.wait()
-            if not cursor_in_new_line:
-                self.to_screen(u'')
-            return proc.returncode
-
-        self.report_destination(filename)
-        tmpfilename = self.temp_name(filename)
-        test = self.params.get('test', False)
-
-        # Check for rtmpdump first
-        try:
-            subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
-        except (OSError, IOError):
-            self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
-            return False
-
-        # Download using rtmpdump. rtmpdump returns exit code 2 when
-        # the connection was interrumpted and resuming appears to be
-        # possible. This is part of rtmpdump's normal usage, AFAIK.
-        basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
-        if player_url is not None:
-            basic_args += ['--swfVfy', player_url]
-        if page_url is not None:
-            basic_args += ['--pageUrl', page_url]
-        if play_path is not None:
-            basic_args += ['--playpath', play_path]
-        if tc_url is not None:
-            basic_args += ['--tcUrl', url]
-        if test:
-            basic_args += ['--stop', '1']
-        if live:
-            basic_args += ['--live']
-        if conn:
-            basic_args += ['--conn', conn]
-        args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
-
-        if sys.platform == 'win32' and sys.version_info < (3, 0):
-            # Windows subprocess module does not actually support Unicode
-            # on Python 2.x
-            # See http://stackoverflow.com/a/9951851/35070
-            subprocess_encoding = sys.getfilesystemencoding()
-            args = [a.encode(subprocess_encoding, 'ignore') for a in args]
-        else:
-            subprocess_encoding = None
-
-        if self.params.get('verbose', False):
-            if subprocess_encoding:
-                str_args = [
-                    a.decode(subprocess_encoding) if isinstance(a, bytes) else a
-                    for a in args]
-            else:
-                str_args = args
-            try:
-                import pipes
-                shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
-            except ImportError:
-                shell_quote = repr
-            self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
-
-        retval = run_rtmpdump(args)
-
-        while (retval == 2 or retval == 1) and not test:
-            prevsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
-            time.sleep(5.0) # This seems to be needed
-            retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
-            cursize = os.path.getsize(encodeFilename(tmpfilename))
-            if prevsize == cursize and retval == 1:
-                break
-             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
-            if prevsize == cursize and retval == 2 and cursize > 1024:
-                self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
-                retval = 0
-                break
-        if retval == 0 or (test and retval == 2):
-            fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'[rtmpdump] %s bytes' % fsize)
-            self.try_rename(tmpfilename, filename)
-            self._hook_progress({
-                'downloaded_bytes': fsize,
-                'total_bytes': fsize,
-                'filename': filename,
-                'status': 'finished',
-            })
-            return True
-        else:
-            self.to_stderr(u"\n")
-            self.report_error(u'rtmpdump exited with code %d' % retval)
-            return False
-
-    def _download_with_mplayer(self, filename, url):
-        self.report_destination(filename)
-        tmpfilename = self.temp_name(filename)
-
-        args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
-        # Check for mplayer first
-        try:
-            subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
-        except (OSError, IOError):
-            self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
-            return False
-
-        # Download using mplayer. 
-        retval = subprocess.call(args)
-        if retval == 0:
-            fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
-            self.try_rename(tmpfilename, filename)
-            self._hook_progress({
-                'downloaded_bytes': fsize,
-                'total_bytes': fsize,
-                'filename': filename,
-                'status': 'finished',
-            })
-            return True
-        else:
-            self.to_stderr(u"\n")
-            self.report_error(u'mplayer exited with code %d' % retval)
-            return False
-
-    def _download_m3u8_with_ffmpeg(self, filename, url):
-        self.report_destination(filename)
-        tmpfilename = self.temp_name(filename)
-
-        args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
-            '-bsf:a', 'aac_adtstoasc', tmpfilename]
-
-        for program in ['avconv', 'ffmpeg']:
-            try:
-                subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
-                break
-            except (OSError, IOError):
-                pass
-        else:
-            self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
-        cmd = [program] + args
-
-        retval = subprocess.call(cmd)
-        if retval == 0:
-            fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
-            self.try_rename(tmpfilename, filename)
-            self._hook_progress({
-                'downloaded_bytes': fsize,
-                'total_bytes': fsize,
-                'filename': filename,
-                'status': 'finished',
-            })
-            return True
-        else:
-            self.to_stderr(u"\n")
-            self.report_error(u'ffmpeg exited with code %d' % retval)
-            return False
+# Legacy file for backwards compatibility, use youtube_dl.downloader instead!
+from .downloader import FileDownloader as RealFileDownloader
+from .downloader import get_suitable_downloader
 
 
 
 
+# This class reproduces the old behaviour of FileDownloader
+class FileDownloader(RealFileDownloader):
     def _do_download(self, filename, info_dict):
     def _do_download(self, filename, info_dict):
-        url = info_dict['url']
-
-        # Check file already present
-        if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
-            self.report_file_already_downloaded(filename)
-            self._hook_progress({
-                'filename': filename,
-                'status': 'finished',
-                'total_bytes': os.path.getsize(encodeFilename(filename)),
-            })
-            return True
-
-        # Attempt to download using rtmpdump
-        if url.startswith('rtmp'):
-            return self._download_with_rtmpdump(filename, url,
-                                                info_dict.get('player_url', None),
-                                                info_dict.get('page_url', None),
-                                                info_dict.get('play_path', None),
-                                                info_dict.get('tc_url', None),
-                                                info_dict.get('rtmp_live', False),
-                                                info_dict.get('rtmp_conn', None))
-
-        # Attempt to download using mplayer
-        if url.startswith('mms') or url.startswith('rtsp'):
-            return self._download_with_mplayer(filename, url)
-
-        # m3u8 manifest are downloaded with ffmpeg
-        if determine_ext(url) == u'm3u8':
-            return self._download_m3u8_with_ffmpeg(filename, url)
-
-        tmpfilename = self.temp_name(filename)
-        stream = None
-
-        # Do not include the Accept-Encoding header
-        headers = {'Youtubedl-no-compression': 'True'}
-        if 'user_agent' in info_dict:
-            headers['Youtubedl-user-agent'] = info_dict['user_agent']
-        basic_request = compat_urllib_request.Request(url, None, headers)
-        request = compat_urllib_request.Request(url, None, headers)
-
-        if self.params.get('test', False):
-            request.add_header('Range','bytes=0-10240')
-
-        # Establish possible resume length
-        if os.path.isfile(encodeFilename(tmpfilename)):
-            resume_len = os.path.getsize(encodeFilename(tmpfilename))
-        else:
-            resume_len = 0
-
-        open_mode = 'wb'
-        if resume_len != 0:
-            if self.params.get('continuedl', False):
-                self.report_resuming_byte(resume_len)
-                request.add_header('Range','bytes=%d-' % resume_len)
-                open_mode = 'ab'
-            else:
-                resume_len = 0
-
-        count = 0
-        retries = self.params.get('retries', 0)
-        while count <= retries:
-            # Establish connection
-            try:
-                if count == 0 and 'urlhandle' in info_dict:
-                    data = info_dict['urlhandle']
-                data = compat_urllib_request.urlopen(request)
-                break
-            except (compat_urllib_error.HTTPError, ) as err:
-                if (err.code < 500 or err.code >= 600) and err.code != 416:
-                    # Unexpected HTTP error
-                    raise
-                elif err.code == 416:
-                    # Unable to resume (requested range not satisfiable)
-                    try:
-                        # Open the connection again without the range header
-                        data = compat_urllib_request.urlopen(basic_request)
-                        content_length = data.info()['Content-Length']
-                    except (compat_urllib_error.HTTPError, ) as err:
-                        if err.code < 500 or err.code >= 600:
-                            raise
-                    else:
-                        # Examine the reported length
-                        if (content_length is not None and
-                                (resume_len - 100 < int(content_length) < resume_len + 100)):
-                            # The file had already been fully downloaded.
-                            # Explanation to the above condition: in issue #175 it was revealed that
-                            # YouTube sometimes adds or removes a few bytes from the end of the file,
-                            # changing the file size slightly and causing problems for some users. So
-                            # I decided to implement a suggested change and consider the file
-                            # completely downloaded if the file size differs less than 100 bytes from
-                            # the one in the hard drive.
-                            self.report_file_already_downloaded(filename)
-                            self.try_rename(tmpfilename, filename)
-                            self._hook_progress({
-                                'filename': filename,
-                                'status': 'finished',
-                            })
-                            return True
-                        else:
-                            # The length does not match, we start the download over
-                            self.report_unable_to_resume()
-                            open_mode = 'wb'
-                            break
-            # Retry
-            count += 1
-            if count <= retries:
-                self.report_retry(count, retries)
-
-        if count > retries:
-            self.report_error(u'giving up after %s retries' % retries)
-            return False
-
-        data_len = data.info().get('Content-length', None)
-        if data_len is not None:
-            data_len = int(data_len) + resume_len
-            min_data_len = self.params.get("min_filesize", None)
-            max_data_len =  self.params.get("max_filesize", None)
-            if min_data_len is not None and data_len < min_data_len:
-                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
-                return False
-            if max_data_len is not None and data_len > max_data_len:
-                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
-                return False
-
-        data_len_str = format_bytes(data_len)
-        byte_counter = 0 + resume_len
-        block_size = self.params.get('buffersize', 1024)
-        start = time.time()
-        while True:
-            # Download and write
-            before = time.time()
-            data_block = data.read(block_size)
-            after = time.time()
-            if len(data_block) == 0:
-                break
-            byte_counter += len(data_block)
-
-            # Open file just in time
-            if stream is None:
-                try:
-                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
-                    assert stream is not None
-                    filename = self.undo_temp_name(tmpfilename)
-                    self.report_destination(filename)
-                except (OSError, IOError) as err:
-                    self.report_error(u'unable to open for writing: %s' % str(err))
-                    return False
-            try:
-                stream.write(data_block)
-            except (IOError, OSError) as err:
-                self.to_stderr(u"\n")
-                self.report_error(u'unable to write data: %s' % str(err))
-                return False
-            if not self.params.get('noresizebuffer', False):
-                block_size = self.best_block_size(after - before, len(data_block))
-
-            # Progress message
-            speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
-            if data_len is None:
-                eta = percent = None
-            else:
-                percent = self.calc_percent(byte_counter, data_len)
-                eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
-            self.report_progress(percent, data_len_str, speed, eta)
-
-            self._hook_progress({
-                'downloaded_bytes': byte_counter,
-                'total_bytes': data_len,
-                'tmpfilename': tmpfilename,
-                'filename': filename,
-                'status': 'downloading',
-                'eta': eta,
-                'speed': speed,
-            })
-
-            # Apply rate limit
-            self.slow_down(start, byte_counter - resume_len)
-
-        if stream is None:
-            self.to_stderr(u"\n")
-            self.report_error(u'Did not get any data blocks')
-            return False
-        stream.close()
-        self.report_finish(data_len_str, (time.time() - start))
-        if data_len is not None and byte_counter != data_len:
-            raise ContentTooShortError(byte_counter, int(data_len))
-        self.try_rename(tmpfilename, filename)
-
-        # Update file modification time
-        if self.params.get('updatetime', True):
-            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
-
-        self._hook_progress({
-            'downloaded_bytes': byte_counter,
-            'total_bytes': byte_counter,
-            'filename': filename,
-            'status': 'finished',
-        })
-
-        return True
-
-    def _hook_progress(self, status):
+        real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
         for ph in self._progress_hooks:
         for ph in self._progress_hooks:
-            ph(status)
-
-    def add_progress_hook(self, ph):
-        """ ph gets called on download progress, with a dictionary with the entries
-        * filename: The final filename
-        * status: One of "downloading" and "finished"
-
-        It can also have some of the following entries:
-
-        * downloaded_bytes: Bytes on disks
-        * total_bytes: Total bytes, None if unknown
-        * tmpfilename: The filename we're currently writing to
-        * eta: The estimated time in seconds, None if unknown
-        * speed: The download speed in bytes/second, None if unknown
-
-        Hooks are guaranteed to be called at least once (with status "finished")
-        if the download is successful.
-        """
-        self._progress_hooks.append(ph)
+            real_fd.add_progress_hook(ph)
+        return real_fd.download(filename, info_dict)
index 2a078adfbbc7f7aed7ca31a6aff85d0e6a9c19b2..d40314ee57e8c93cd4a4ae86509a976e412baf78 100644 (file)
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-from __future__ import absolute_import
+from __future__ import absolute_import, unicode_literals
 
 import collections
 import errno
 
 import collections
 import errno
@@ -51,9 +51,11 @@ from .utils import (
     write_json_file,
     write_string,
     YoutubeDLHandler,
     write_json_file,
     write_string,
     YoutubeDLHandler,
+    prepend_extension,
 )
 from .extractor import get_info_extractor, gen_extractors
 )
 from .extractor import get_info_extractor, gen_extractors
-from .FileDownloader import FileDownloader
+from .downloader import get_suitable_downloader
+from .postprocessor import FFmpegMergerPP
 from .version import __version__
 
 
 from .version import __version__
 
 
@@ -148,11 +150,16 @@ class YoutubeDL(object):
     socket_timeout:    Time to wait for unresponsive hosts, in seconds
     bidi_workaround:   Work around buggy terminals without bidirectional text
                        support, using fridibi
     socket_timeout:    Time to wait for unresponsive hosts, in seconds
     bidi_workaround:   Work around buggy terminals without bidirectional text
                        support, using fridibi
+    debug_printtraffic:Print out sent and received HTTP traffic
 
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
     noresizebuffer, retries, continuedl, noprogress, consoletitle
 
     The following parameters are not used by YoutubeDL itself, they are used by
     the FileDownloader:
     nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
     noresizebuffer, retries, continuedl, noprogress, consoletitle
+
+    The following options are used by the post processors:
+    prefer_ffmpeg:     If True, use ffmpeg instead of avconv if both are available,
+                       otherwise prefer avconv.
     """
 
     params = None
     """
 
     params = None
@@ -164,6 +171,8 @@ class YoutubeDL(object):
 
     def __init__(self, params=None):
         """Create a FileDownloader object with the given options."""
 
     def __init__(self, params=None):
         """Create a FileDownloader object with the given options."""
+        if params is None:
+            params = {}
         self._ies = []
         self._ies_instances = {}
         self._pps = []
         self._ies = []
         self._ies_instances = {}
         self._pps = []
@@ -172,7 +181,7 @@ class YoutubeDL(object):
         self._num_downloads = 0
         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
         self._err_file = sys.stderr
         self._num_downloads = 0
         self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
         self._err_file = sys.stderr
-        self.params = {} if params is None else params
+        self.params = params
 
         if params.get('bidi_workaround', False):
             try:
 
         if params.get('bidi_workaround', False):
             try:
@@ -183,15 +192,21 @@ class YoutubeDL(object):
                     width_args = []
                 else:
                     width_args = ['-w', str(width)]
                     width_args = []
                 else:
                     width_args = ['-w', str(width)]
-                self._fribidi = subprocess.Popen(
-                    ['fribidi', '-c', 'UTF-8'] + width_args,
+                sp_kwargs = dict(
                     stdin=subprocess.PIPE,
                     stdout=slave,
                     stderr=self._err_file)
                     stdin=subprocess.PIPE,
                     stdout=slave,
                     stderr=self._err_file)
-                self._fribidi_channel = os.fdopen(master, 'rb')
+                try:
+                    self._output_process = subprocess.Popen(
+                        ['bidiv'] + width_args, **sp_kwargs
+                    )
+                except OSError:
+                    self._output_process = subprocess.Popen(
+                        ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
+                self._output_channel = os.fdopen(master, 'rb')
             except OSError as ose:
                 if ose.errno == 2:
             except OSError as ose:
                 if ose.errno == 2:
-                    self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
+                    self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that  fribidi  is an executable file in one of the directories in your $PATH.')
                 else:
                     raise
 
                 else:
                     raise
 
@@ -200,15 +215,13 @@ class YoutubeDL(object):
                 and not params['restrictfilenames']):
             # On Python 3, the Unicode filesystem API will throw errors (#1474)
             self.report_warning(
                 and not params['restrictfilenames']):
             # On Python 3, the Unicode filesystem API will throw errors (#1474)
             self.report_warning(
-                u'Assuming --restrict-filenames since file system encoding '
-                u'cannot encode all charactes. '
-                u'Set the LC_ALL environment variable to fix this.')
+                'Assuming --restrict-filenames since file system encoding '
+                'cannot encode all charactes. '
+                'Set the LC_ALL environment variable to fix this.')
             self.params['restrictfilenames'] = True
 
             self.params['restrictfilenames'] = True
 
-        self.fd = FileDownloader(self, self.params)
-
         if '%(stitle)s' in self.params.get('outtmpl', ''):
         if '%(stitle)s' in self.params.get('outtmpl', ''):
-            self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
+            self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
 
         self._setup_opener()
 
 
         self._setup_opener()
 
@@ -242,17 +255,22 @@ class YoutubeDL(object):
         self._pps.append(pp)
         pp.set_downloader(self)
 
         self._pps.append(pp)
         pp.set_downloader(self)
 
+    def add_progress_hook(self, ph):
+        """Add the progress hook (currently only for the file downloader)"""
+        self._progress_hooks.append(ph)
+
     def _bidi_workaround(self, message):
     def _bidi_workaround(self, message):
-        if not hasattr(self, '_fribidi_channel'):
+        if not hasattr(self, '_output_channel'):
             return message
 
             return message
 
-        assert type(message) == type(u'')
-        line_count = message.count(u'\n') + 1
-        self._fribidi.stdin.write((message + u'\n').encode('utf-8'))
-        self._fribidi.stdin.flush()
-        res = u''.join(self._fribidi_channel.readline().decode('utf-8')
+        assert hasattr(self, '_output_process')
+        assert type(message) == type('')
+        line_count = message.count('\n') + 1
+        self._output_process.stdin.write((message + '\n').encode('utf-8'))
+        self._output_process.stdin.flush()
+        res = ''.join(self._output_channel.readline().decode('utf-8')
                        for _ in range(line_count))
                        for _ in range(line_count))
-        return res[:-len(u'\n')]
+        return res[:-len('\n')]
 
     def to_screen(self, message, skip_eol=False):
         """Print message to stdout if not in quiet mode."""
 
     def to_screen(self, message, skip_eol=False):
         """Print message to stdout if not in quiet mode."""
@@ -264,19 +282,19 @@ class YoutubeDL(object):
             self.params['logger'].debug(message)
         elif not check_quiet or not self.params.get('quiet', False):
             message = self._bidi_workaround(message)
             self.params['logger'].debug(message)
         elif not check_quiet or not self.params.get('quiet', False):
             message = self._bidi_workaround(message)
-            terminator = [u'\n', u''][skip_eol]
+            terminator = ['\n', ''][skip_eol]
             output = message + terminator
 
             write_string(output, self._screen_file)
 
     def to_stderr(self, message):
         """Print message to stderr."""
             output = message + terminator
 
             write_string(output, self._screen_file)
 
     def to_stderr(self, message):
         """Print message to stderr."""
-        assert type(message) == type(u'')
+        assert type(message) == type('')
         if self.params.get('logger'):
             self.params['logger'].error(message)
         else:
             message = self._bidi_workaround(message)
         if self.params.get('logger'):
             self.params['logger'].error(message)
         else:
             message = self._bidi_workaround(message)
-            output = message + u'\n'
+            output = message + '\n'
             write_string(output, self._err_file)
 
     def to_console_title(self, message):
             write_string(output, self._err_file)
 
     def to_console_title(self, message):
@@ -287,21 +305,21 @@ class YoutubeDL(object):
             # already of type unicode()
             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
         elif 'TERM' in os.environ:
             # already of type unicode()
             ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
         elif 'TERM' in os.environ:
-            write_string(u'\033]0;%s\007' % message, self._screen_file)
+            write_string('\033]0;%s\007' % message, self._screen_file)
 
     def save_console_title(self):
         if not self.params.get('consoletitle', False):
             return
         if 'TERM' in os.environ:
             # Save the title on stack
 
     def save_console_title(self):
         if not self.params.get('consoletitle', False):
             return
         if 'TERM' in os.environ:
             # Save the title on stack
-            write_string(u'\033[22;0t', self._screen_file)
+            write_string('\033[22;0t', self._screen_file)
 
     def restore_console_title(self):
         if not self.params.get('consoletitle', False):
             return
         if 'TERM' in os.environ:
             # Restore the title from stack
 
     def restore_console_title(self):
         if not self.params.get('consoletitle', False):
             return
         if 'TERM' in os.environ:
             # Restore the title from stack
-            write_string(u'\033[23;0t', self._screen_file)
+            write_string('\033[23;0t', self._screen_file)
 
     def __enter__(self):
         self.save_console_title()
 
     def __enter__(self):
         self.save_console_title()
@@ -327,13 +345,13 @@ class YoutubeDL(object):
         if self.params.get('verbose'):
             if tb is None:
                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
         if self.params.get('verbose'):
             if tb is None:
                 if sys.exc_info()[0]:  # if .trouble has been called from an except block
-                    tb = u''
+                    tb = ''
                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
                     if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
-                        tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
+                        tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
                     tb += compat_str(traceback.format_exc())
                 else:
                     tb_data = traceback.format_list(traceback.extract_stack())
                     tb += compat_str(traceback.format_exc())
                 else:
                     tb_data = traceback.format_list(traceback.extract_stack())
-                    tb = u''.join(tb_data)
+                    tb = ''.join(tb_data)
             self.to_stderr(tb)
         if not self.params.get('ignoreerrors', False):
             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
             self.to_stderr(tb)
         if not self.params.get('ignoreerrors', False):
             if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
@@ -349,10 +367,10 @@ class YoutubeDL(object):
         If stderr is a tty file the 'WARNING:' will be colored
         '''
         if self._err_file.isatty() and os.name != 'nt':
         If stderr is a tty file the 'WARNING:' will be colored
         '''
         if self._err_file.isatty() and os.name != 'nt':
-            _msg_header = u'\033[0;33mWARNING:\033[0m'
+            _msg_header = '\033[0;33mWARNING:\033[0m'
         else:
         else:
-            _msg_header = u'WARNING:'
-        warning_message = u'%s %s' % (_msg_header, message)
+            _msg_header = 'WARNING:'
+        warning_message = '%s %s' % (_msg_header, message)
         self.to_stderr(warning_message)
 
     def report_error(self, message, tb=None):
         self.to_stderr(warning_message)
 
     def report_error(self, message, tb=None):
@@ -361,18 +379,18 @@ class YoutubeDL(object):
         in red if stderr is a tty file.
         '''
         if self._err_file.isatty() and os.name != 'nt':
         in red if stderr is a tty file.
         '''
         if self._err_file.isatty() and os.name != 'nt':
-            _msg_header = u'\033[0;31mERROR:\033[0m'
+            _msg_header = '\033[0;31mERROR:\033[0m'
         else:
         else:
-            _msg_header = u'ERROR:'
-        error_message = u'%s %s' % (_msg_header, message)
+            _msg_header = 'ERROR:'
+        error_message = '%s %s' % (_msg_header, message)
         self.trouble(error_message, tb)
 
     def report_file_already_downloaded(self, file_name):
         """Report file has already been fully downloaded."""
         try:
         self.trouble(error_message, tb)
 
     def report_file_already_downloaded(self, file_name):
         """Report file has already been fully downloaded."""
         try:
-            self.to_screen(u'[download] %s has already been downloaded' % file_name)
+            self.to_screen('[download] %s has already been downloaded' % file_name)
         except UnicodeEncodeError:
         except UnicodeEncodeError:
-            self.to_screen(u'[download] The file has already been downloaded')
+            self.to_screen('[download] The file has already been downloaded')
 
     def increment_downloads(self):
         """Increment the ordinal that assigns a number to each file."""
 
     def increment_downloads(self):
         """Increment the ordinal that assigns a number to each file."""
@@ -387,61 +405,61 @@ class YoutubeDL(object):
             autonumber_size = self.params.get('autonumber_size')
             if autonumber_size is None:
                 autonumber_size = 5
             autonumber_size = self.params.get('autonumber_size')
             if autonumber_size is None:
                 autonumber_size = 5
-            autonumber_templ = u'%0' + str(autonumber_size) + u'd'
+            autonumber_templ = '%0' + str(autonumber_size) + 'd'
             template_dict['autonumber'] = autonumber_templ % self._num_downloads
             if template_dict.get('playlist_index') is not None:
             template_dict['autonumber'] = autonumber_templ % self._num_downloads
             if template_dict.get('playlist_index') is not None:
-                template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
+                template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
 
             sanitize = lambda k, v: sanitize_filename(
                 compat_str(v),
                 restricted=self.params.get('restrictfilenames'),
 
             sanitize = lambda k, v: sanitize_filename(
                 compat_str(v),
                 restricted=self.params.get('restrictfilenames'),
-                is_id=(k == u'id'))
+                is_id=(k == 'id'))
             template_dict = dict((k, sanitize(k, v))
                                  for k, v in template_dict.items()
                                  if v is not None)
             template_dict = dict((k, sanitize(k, v))
                                  for k, v in template_dict.items()
                                  if v is not None)
-            template_dict = collections.defaultdict(lambda: u'NA', template_dict)
+            template_dict = collections.defaultdict(lambda: 'NA', template_dict)
 
             tmpl = os.path.expanduser(self.params['outtmpl'])
             filename = tmpl % template_dict
             return filename
         except ValueError as err:
 
             tmpl = os.path.expanduser(self.params['outtmpl'])
             filename = tmpl % template_dict
             return filename
         except ValueError as err:
-            self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')')
+            self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
             return None
 
     def _match_entry(self, info_dict):
         """ Returns None iff the file should be downloaded """
 
             return None
 
     def _match_entry(self, info_dict):
         """ Returns None iff the file should be downloaded """
 
-        video_title = info_dict.get('title', info_dict.get('id', u'video'))
+        video_title = info_dict.get('title', info_dict.get('id', 'video'))
         if 'title' in info_dict:
             # This can happen when we're just evaluating the playlist
             title = info_dict['title']
             matchtitle = self.params.get('matchtitle', False)
             if matchtitle:
                 if not re.search(matchtitle, title, re.IGNORECASE):
         if 'title' in info_dict:
             # This can happen when we're just evaluating the playlist
             title = info_dict['title']
             matchtitle = self.params.get('matchtitle', False)
             if matchtitle:
                 if not re.search(matchtitle, title, re.IGNORECASE):
-                    return u'"' + title + '" title did not match pattern "' + matchtitle + '"'
+                    return '"' + title + '" title did not match pattern "' + matchtitle + '"'
             rejecttitle = self.params.get('rejecttitle', False)
             if rejecttitle:
                 if re.search(rejecttitle, title, re.IGNORECASE):
             rejecttitle = self.params.get('rejecttitle', False)
             if rejecttitle:
                 if re.search(rejecttitle, title, re.IGNORECASE):
-                    return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
+                    return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
         date = info_dict.get('upload_date', None)
         if date is not None:
             dateRange = self.params.get('daterange', DateRange())
             if date not in dateRange:
         date = info_dict.get('upload_date', None)
         if date is not None:
             dateRange = self.params.get('daterange', DateRange())
             if date not in dateRange:
-                return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
+                return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
         view_count = info_dict.get('view_count', None)
         if view_count is not None:
             min_views = self.params.get('min_views')
             if min_views is not None and view_count < min_views:
         view_count = info_dict.get('view_count', None)
         if view_count is not None:
             min_views = self.params.get('min_views')
             if min_views is not None and view_count < min_views:
-                return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
+                return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
             max_views = self.params.get('max_views')
             if max_views is not None and view_count > max_views:
             max_views = self.params.get('max_views')
             if max_views is not None and view_count > max_views:
-                return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
+                return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
         age_limit = self.params.get('age_limit')
         if age_limit is not None:
             if age_limit < info_dict.get('age_limit', 0):
         age_limit = self.params.get('age_limit')
         if age_limit is not None:
             if age_limit < info_dict.get('age_limit', 0):
-                return u'Skipping "' + title + '" because it is age restricted'
+                return 'Skipping "' + title + '" because it is age restricted'
         if self.in_download_archive(info_dict):
         if self.in_download_archive(info_dict):
-            return u'%s has already been recorded in archive' % video_title
+            return '%s has already been recorded in archive' % video_title
         return None
 
     @staticmethod
         return None
 
     @staticmethod
@@ -468,8 +486,8 @@ class YoutubeDL(object):
                 continue
 
             if not ie.working():
                 continue
 
             if not ie.working():
-                self.report_warning(u'The program functionality for this site has been marked as broken, '
-                                    u'and will probably not work.')
+                self.report_warning('The program functionality for this site has been marked as broken, '
+                                    'and will probably not work.')
 
             try:
                 ie_result = ie.extract(url)
 
             try:
                 ie_result = ie.extract(url)
@@ -502,7 +520,7 @@ class YoutubeDL(object):
                 else:
                     raise
         else:
                 else:
                     raise
         else:
-            self.report_error(u'no suitable InfoExtractor: %s' % url)
+            self.report_error('no suitable InfoExtractor: %s' % url)
 
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
 
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
@@ -533,7 +551,7 @@ class YoutubeDL(object):
             def make_result(embedded_info):
                 new_result = ie_result.copy()
                 for f in ('_type', 'url', 'ext', 'player_url', 'formats',
             def make_result(embedded_info):
                 new_result = ie_result.copy()
                 for f in ('_type', 'url', 'ext', 'player_url', 'formats',
-                          'entries', 'urlhandle', 'ie_key', 'duration',
+                          'entries', 'ie_key', 'duration',
                           'subtitles', 'annotations', 'format',
                           'thumbnail', 'thumbnails'):
                     if f in new_result:
                           'subtitles', 'annotations', 'format',
                           'thumbnail', 'thumbnails'):
                     if f in new_result:
@@ -553,7 +571,7 @@ class YoutubeDL(object):
         elif result_type == 'playlist':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
         elif result_type == 'playlist':
             # We process each entry in the playlist
             playlist = ie_result.get('title', None) or ie_result.get('id', None)
-            self.to_screen(u'[download] Downloading playlist: %s' % playlist)
+            self.to_screen('[download] Downloading playlist: %s' % playlist)
 
             playlist_results = []
 
 
             playlist_results = []
 
@@ -568,11 +586,11 @@ class YoutubeDL(object):
             n_entries = len(entries)
 
             self.to_screen(
             n_entries = len(entries)
 
             self.to_screen(
-                u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
+                "[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
                 (ie_result['extractor'], playlist, n_all_entries, n_entries))
 
             for i, entry in enumerate(entries, 1):
                 (ie_result['extractor'], playlist, n_all_entries, n_entries))
 
             for i, entry in enumerate(entries, 1):
-                self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries))
+                self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
                 extra = {
                     'playlist': playlist,
                     'playlist_index': i + playliststart,
                 extra = {
                     'playlist': playlist,
                     'playlist_index': i + playliststart,
@@ -584,7 +602,7 @@ class YoutubeDL(object):
 
                 reason = self._match_entry(entry)
                 if reason is not None:
 
                 reason = self._match_entry(entry)
                 if reason is not None:
-                    self.to_screen(u'[download] ' + reason)
+                    self.to_screen('[download] ' + reason)
                     continue
 
                 entry_result = self.process_ie_result(entry,
                     continue
 
                 entry_result = self.process_ie_result(entry,
@@ -617,7 +635,7 @@ class YoutubeDL(object):
         elif format_spec == 'worst':
             return available_formats[0]
         else:
         elif format_spec == 'worst':
             return available_formats[0]
         else:
-            extensions = [u'mp4', u'flv', u'webm', u'3gp']
+            extensions = ['mp4', 'flv', 'webm', '3gp']
             if format_spec in extensions:
                 filter_f = lambda f: f['ext'] == format_spec
             else:
             if format_spec in extensions:
                 filter_f = lambda f: f['ext'] == format_spec
             else:
@@ -636,7 +654,7 @@ class YoutubeDL(object):
             info_dict['playlist_index'] = None
 
         # This extractors handle format selection themselves
             info_dict['playlist_index'] = None
 
         # This extractors handle format selection themselves
-        if info_dict['extractor'] in [u'youtube', u'Youku']:
+        if info_dict['extractor'] in ['Youku']:
             if download:
                 self.process_info(info_dict)
             return info_dict
             if download:
                 self.process_info(info_dict)
             return info_dict
@@ -653,33 +671,32 @@ class YoutubeDL(object):
             if format.get('format_id') is None:
                 format['format_id'] = compat_str(i)
             if format.get('format') is None:
             if format.get('format_id') is None:
                 format['format_id'] = compat_str(i)
             if format.get('format') is None:
-                format['format'] = u'{id} - {res}{note}'.format(
+                format['format'] = '{id} - {res}{note}'.format(
                     id=format['format_id'],
                     res=self.format_resolution(format),
                     id=format['format_id'],
                     res=self.format_resolution(format),
-                    note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
+                    note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
                 )
             # Automatically determine file extension if missing
             if 'ext' not in format:
                 format['ext'] = determine_ext(format['url'])
 
                 )
             # Automatically determine file extension if missing
             if 'ext' not in format:
                 format['ext'] = determine_ext(format['url'])
 
-        if self.params.get('listformats', None):
-            self.list_formats(info_dict)
-            return
-
         format_limit = self.params.get('format_limit', None)
         if format_limit:
             formats = list(takewhile_inclusive(
                 lambda f: f['format_id'] != format_limit, formats
             ))
         format_limit = self.params.get('format_limit', None)
         if format_limit:
             formats = list(takewhile_inclusive(
                 lambda f: f['format_id'] != format_limit, formats
             ))
-        if self.params.get('prefer_free_formats'):
-            def _free_formats_key(f):
-                try:
-                    ext_ord = [u'flv', u'mp4', u'webm'].index(f['ext'])
-                except ValueError:
-                    ext_ord = -1
-                # We only compare the extension if they have the same height and width
-                return (f.get('height'), f.get('width'), ext_ord)
-            formats = sorted(formats, key=_free_formats_key)
+
+        # TODO Central sorting goes here
+
+        if formats[0] is not info_dict: 
+            # only set the 'formats' fields if the original info_dict list them
+            # otherwise we end up with a circular reference, the first (and unique)
+            # element in the 'formats' field in info_dict is info_dict itself, 
+            # wich can't be exported to json
+            info_dict['formats'] = formats
+        if self.params.get('listformats', None):
+            self.list_formats(info_dict)
+            return
 
         req_format = self.params.get('format', 'best')
         if req_format is None:
 
         req_format = self.params.get('format', 'best')
         if req_format is None:
@@ -689,21 +706,35 @@ class YoutubeDL(object):
         if req_format in ('-1', 'all'):
             formats_to_download = formats
         else:
         if req_format in ('-1', 'all'):
             formats_to_download = formats
         else:
-            # We can accept formats requestd in the format: 34/5/best, we pick
+            # We can accept formats requested in the format: 34/5/best, we pick
             # the first that is available, starting from left
             req_formats = req_format.split('/')
             for rf in req_formats:
             # the first that is available, starting from left
             req_formats = req_format.split('/')
             for rf in req_formats:
-                selected_format = self.select_format(rf, formats)
+                if re.match(r'.+?\+.+?', rf) is not None:
+                    # Two formats have been requested like '137+139'
+                    format_1, format_2 = rf.split('+')
+                    formats_info = (self.select_format(format_1, formats),
+                        self.select_format(format_2, formats))
+                    if all(formats_info):
+                        selected_format = {
+                            'requested_formats': formats_info,
+                            'format': rf,
+                            'ext': formats_info[0]['ext'],
+                        }
+                    else:
+                        selected_format = None
+                else:
+                    selected_format = self.select_format(rf, formats)
                 if selected_format is not None:
                     formats_to_download = [selected_format]
                     break
         if not formats_to_download:
                 if selected_format is not None:
                     formats_to_download = [selected_format]
                     break
         if not formats_to_download:
-            raise ExtractorError(u'requested format not available',
+            raise ExtractorError('requested format not available',
                                  expected=True)
 
         if download:
             if len(formats_to_download) > 1:
                                  expected=True)
 
         if download:
             if len(formats_to_download) > 1:
-                self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
+                self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
             for format in formats_to_download:
                 new_info = dict(info_dict)
                 new_info.update(format)
             for format in formats_to_download:
                 new_info = dict(info_dict)
                 new_info.update(format)
@@ -721,7 +752,7 @@ class YoutubeDL(object):
 
         info_dict['fulltitle'] = info_dict['title']
         if len(info_dict['title']) > 200:
 
         info_dict['fulltitle'] = info_dict['title']
         if len(info_dict['title']) > 200:
-            info_dict['title'] = info_dict['title'][:197] + u'...'
+            info_dict['title'] = info_dict['title'][:197] + '...'
 
         # Keep for backwards compatibility
         info_dict['stitle'] = info_dict['title']
 
         # Keep for backwards compatibility
         info_dict['stitle'] = info_dict['title']
@@ -731,7 +762,7 @@ class YoutubeDL(object):
 
         reason = self._match_entry(info_dict)
         if reason is not None:
 
         reason = self._match_entry(info_dict)
         if reason is not None:
-            self.to_screen(u'[download] ' + reason)
+            self.to_screen('[download] ' + reason)
             return
 
         max_downloads = self.params.get('max_downloads')
             return
 
         max_downloads = self.params.get('max_downloads')
@@ -748,7 +779,7 @@ class YoutubeDL(object):
             self.to_stdout(info_dict['id'])
         if self.params.get('forceurl', False):
             # For RTMP URLs, also include the playpath
             self.to_stdout(info_dict['id'])
         if self.params.get('forceurl', False):
             # For RTMP URLs, also include the playpath
-            self.to_stdout(info_dict['url'] + info_dict.get('play_path', u''))
+            self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
             self.to_stdout(info_dict['thumbnail'])
         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
         if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
             self.to_stdout(info_dict['thumbnail'])
         if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
@@ -775,37 +806,37 @@ class YoutubeDL(object):
             if dn != '' and not os.path.exists(dn):
                 os.makedirs(dn)
         except (OSError, IOError) as err:
             if dn != '' and not os.path.exists(dn):
                 os.makedirs(dn)
         except (OSError, IOError) as err:
-            self.report_error(u'unable to create directory ' + compat_str(err))
+            self.report_error('unable to create directory ' + compat_str(err))
             return
 
         if self.params.get('writedescription', False):
             return
 
         if self.params.get('writedescription', False):
-            descfn = filename + u'.description'
+            descfn = filename + '.description'
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
-                self.to_screen(u'[info] Video description is already present')
+                self.to_screen('[info] Video description is already present')
             else:
                 try:
             else:
                 try:
-                    self.to_screen(u'[info] Writing video description to: ' + descfn)
+                    self.to_screen('[info] Writing video description to: ' + descfn)
                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
                         descfile.write(info_dict['description'])
                 except (KeyError, TypeError):
                     with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
                         descfile.write(info_dict['description'])
                 except (KeyError, TypeError):
-                    self.report_warning(u'There\'s no description to write.')
+                    self.report_warning('There\'s no description to write.')
                 except (OSError, IOError):
                 except (OSError, IOError):
-                    self.report_error(u'Cannot write description file ' + descfn)
+                    self.report_error('Cannot write description file ' + descfn)
                     return
 
         if self.params.get('writeannotations', False):
                     return
 
         if self.params.get('writeannotations', False):
-            annofn = filename + u'.annotations.xml'
+            annofn = filename + '.annotations.xml'
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
-                self.to_screen(u'[info] Video annotations are already present')
+                self.to_screen('[info] Video annotations are already present')
             else:
                 try:
             else:
                 try:
-                    self.to_screen(u'[info] Writing video annotations to: ' + annofn)
+                    self.to_screen('[info] Writing video annotations to: ' + annofn)
                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
                         annofile.write(info_dict['annotations'])
                 except (KeyError, TypeError):
                     with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
                         annofile.write(info_dict['annotations'])
                 except (KeyError, TypeError):
-                    self.report_warning(u'There are no annotations to write.')
+                    self.report_warning('There are no annotations to write.')
                 except (OSError, IOError):
                 except (OSError, IOError):
-                    self.report_error(u'Cannot write annotations file: ' + annofn)
+                    self.report_error('Cannot write annotations file: ' + annofn)
                     return
 
         subtitles_are_requested = any([self.params.get('writesubtitles', False),
                     return
 
         subtitles_are_requested = any([self.params.get('writesubtitles', False),
@@ -823,46 +854,45 @@ class YoutubeDL(object):
                 try:
                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
                 try:
                     sub_filename = subtitles_filename(filename, sub_lang, sub_format)
                     if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
-                        self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
+                        self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
                     else:
                     else:
-                        self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
+                        self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
                                 subfile.write(sub)
                 except (OSError, IOError):
                         with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
                                 subfile.write(sub)
                 except (OSError, IOError):
-                    self.report_error(u'Cannot write subtitles file ' + descfn)
+                    self.report_error('Cannot write subtitles file ' + descfn)
                     return
 
         if self.params.get('writeinfojson', False):
                     return
 
         if self.params.get('writeinfojson', False):
-            infofn = os.path.splitext(filename)[0] + u'.info.json'
+            infofn = os.path.splitext(filename)[0] + '.info.json'
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
             if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
-                self.to_screen(u'[info] Video description metadata is already present')
+                self.to_screen('[info] Video description metadata is already present')
             else:
             else:
-                self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn)
+                self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
                 try:
                 try:
-                    json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
-                    write_json_file(json_info_dict, encodeFilename(infofn))
+                    write_json_file(info_dict, encodeFilename(infofn))
                 except (OSError, IOError):
                 except (OSError, IOError):
-                    self.report_error(u'Cannot write metadata to JSON file ' + infofn)
+                    self.report_error('Cannot write metadata to JSON file ' + infofn)
                     return
 
         if self.params.get('writethumbnail', False):
             if info_dict.get('thumbnail') is not None:
                     return
 
         if self.params.get('writethumbnail', False):
             if info_dict.get('thumbnail') is not None:
-                thumb_format = determine_ext(info_dict['thumbnail'], u'jpg')
-                thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format
+                thumb_format = determine_ext(info_dict['thumbnail'], 'jpg')
+                thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format
                 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
                 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
-                    self.to_screen(u'[%s] %s: Thumbnail is already present' %
+                    self.to_screen('[%s] %s: Thumbnail is already present' %
                                    (info_dict['extractor'], info_dict['id']))
                 else:
                                    (info_dict['extractor'], info_dict['id']))
                 else:
-                    self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
+                    self.to_screen('[%s] %s: Downloading thumbnail ...' %
                                    (info_dict['extractor'], info_dict['id']))
                     try:
                         uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
                         with open(thumb_filename, 'wb') as thumbf:
                             shutil.copyfileobj(uf, thumbf)
                                    (info_dict['extractor'], info_dict['id']))
                     try:
                         uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
                         with open(thumb_filename, 'wb') as thumbf:
                             shutil.copyfileobj(uf, thumbf)
-                        self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
+                        self.to_screen('[%s] %s: Writing thumbnail to: %s' %
                             (info_dict['extractor'], info_dict['id'], thumb_filename))
                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                             (info_dict['extractor'], info_dict['id'], thumb_filename))
                     except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                        self.report_warning(u'Unable to download thumbnail "%s": %s' %
+                        self.report_warning('Unable to download thumbnail "%s": %s' %
                             (info_dict['thumbnail'], compat_str(err)))
 
         if not self.params.get('skip_download', False):
                             (info_dict['thumbnail'], compat_str(err)))
 
         if not self.params.get('skip_download', False):
@@ -870,21 +900,41 @@ class YoutubeDL(object):
                 success = True
             else:
                 try:
                 success = True
             else:
                 try:
-                    success = self.fd._do_download(filename, info_dict)
+                    def dl(name, info):
+                        fd = get_suitable_downloader(info)(self, self.params)
+                        for ph in self._progress_hooks:
+                            fd.add_progress_hook(ph)
+                        return fd.download(name, info)
+                    if info_dict.get('requested_formats') is not None:
+                        downloaded = []
+                        success = True
+                        for f in info_dict['requested_formats']:
+                            new_info = dict(info_dict)
+                            new_info.update(f)
+                            fname = self.prepare_filename(new_info)
+                            fname = prepend_extension(fname, 'f%s' % f['format_id'])
+                            downloaded.append(fname)
+                            partial_success = dl(fname, new_info)
+                            success = success and partial_success
+                        info_dict['__postprocessors'] = [FFmpegMergerPP(self)]
+                        info_dict['__files_to_merge'] = downloaded
+                    else:
+                        # Just a single file
+                        success = dl(filename, info_dict)
                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
                 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                    self.report_error(u'unable to download video data: %s' % str(err))
+                    self.report_error('unable to download video data: %s' % str(err))
                     return
                 except (OSError, IOError) as err:
                     raise UnavailableVideoError(err)
                 except (ContentTooShortError, ) as err:
                     return
                 except (OSError, IOError) as err:
                     raise UnavailableVideoError(err)
                 except (ContentTooShortError, ) as err:
-                    self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+                    self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
                     return
 
             if success:
                 try:
                     self.post_process(filename, info_dict)
                 except (PostProcessingError) as err:
                     return
 
             if success:
                 try:
                     self.post_process(filename, info_dict)
                 except (PostProcessingError) as err:
-                    self.report_error(u'postprocessing: %s' % str(err))
+                    self.report_error('postprocessing: %s' % str(err))
                     return
 
         self.record_download_archive(info_dict)
                     return
 
         self.record_download_archive(info_dict)
@@ -901,9 +951,9 @@ class YoutubeDL(object):
                 #It also downloads the videos
                 self.extract_info(url)
             except UnavailableVideoError:
                 #It also downloads the videos
                 self.extract_info(url)
             except UnavailableVideoError:
-                self.report_error(u'unable to download video')
+                self.report_error('unable to download video')
             except MaxDownloadsReached:
             except MaxDownloadsReached:
-                self.to_screen(u'[info] Maximum number of downloaded files reached.')
+                self.to_screen('[info] Maximum number of downloaded files reached.')
                 raise
 
         return self._download_retcode
                 raise
 
         return self._download_retcode
@@ -916,7 +966,7 @@ class YoutubeDL(object):
         except DownloadError:
             webpage_url = info.get('webpage_url')
             if webpage_url is not None:
         except DownloadError:
             webpage_url = info.get('webpage_url')
             if webpage_url is not None:
-                self.report_warning(u'The info failed to download, trying with "%s"' % webpage_url)
+                self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
                 return self.download([webpage_url])
             else:
                 raise
                 return self.download([webpage_url])
             else:
                 raise
@@ -927,7 +977,11 @@ class YoutubeDL(object):
         info = dict(ie_info)
         info['filepath'] = filename
         keep_video = None
         info = dict(ie_info)
         info['filepath'] = filename
         keep_video = None
-        for pp in self._pps:
+        pps_chain = []
+        if ie_info.get('__postprocessors') is not None:
+            pps_chain.extend(ie_info['__postprocessors'])
+        pps_chain.extend(self._pps)
+        for pp in pps_chain:
             try:
                 keep_video_wish, new_info = pp.run(info)
                 if keep_video_wish is not None:
             try:
                 keep_video_wish, new_info = pp.run(info)
                 if keep_video_wish is not None:
@@ -940,10 +994,10 @@ class YoutubeDL(object):
                 self.report_error(e.msg)
         if keep_video is False and not self.params.get('keepvideo', False):
             try:
                 self.report_error(e.msg)
         if keep_video is False and not self.params.get('keepvideo', False):
             try:
-                self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
+                self.to_screen('Deleting original file %s (pass -k to keep)' % filename)
                 os.remove(encodeFilename(filename))
             except (IOError, OSError):
                 os.remove(encodeFilename(filename))
             except (IOError, OSError):
-                self.report_warning(u'Unable to remove downloaded video file')
+                self.report_warning('Unable to remove downloaded video file')
 
     def _make_archive_id(self, info_dict):
         # Future-proof against any change in case
 
     def _make_archive_id(self, info_dict):
         # Future-proof against any change in case
@@ -954,7 +1008,7 @@ class YoutubeDL(object):
                 extractor = info_dict.get('ie_key')  # key in a playlist
         if extractor is None:
             return None  # Incomplete video information
                 extractor = info_dict.get('ie_key')  # key in a playlist
         if extractor is None:
             return None  # Incomplete video information
-        return extractor.lower() + u' ' + info_dict['id']
+        return extractor.lower() + ' ' + info_dict['id']
 
     def in_download_archive(self, info_dict):
         fn = self.params.get('download_archive')
 
     def in_download_archive(self, info_dict):
         fn = self.params.get('download_archive')
@@ -982,53 +1036,61 @@ class YoutubeDL(object):
         vid_id = self._make_archive_id(info_dict)
         assert vid_id
         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
         vid_id = self._make_archive_id(info_dict)
         assert vid_id
         with locked_file(fn, 'a', encoding='utf-8') as archive_file:
-            archive_file.write(vid_id + u'\n')
+            archive_file.write(vid_id + '\n')
 
     @staticmethod
     def format_resolution(format, default='unknown'):
         if format.get('vcodec') == 'none':
             return 'audio only'
 
     @staticmethod
     def format_resolution(format, default='unknown'):
         if format.get('vcodec') == 'none':
             return 'audio only'
-        if format.get('_resolution') is not None:
-            return format['_resolution']
+        if format.get('resolution') is not None:
+            return format['resolution']
         if format.get('height') is not None:
             if format.get('width') is not None:
         if format.get('height') is not None:
             if format.get('width') is not None:
-                res = u'%sx%s' % (format['width'], format['height'])
+                res = '%sx%s' % (format['width'], format['height'])
             else:
             else:
-                res = u'%sp' % format['height']
+                res = '%sp' % format['height']
+        elif format.get('width') is not None:
+            res = '?x%d' % format['width']
         else:
             res = default
         return res
 
     def list_formats(self, info_dict):
         def format_note(fdict):
         else:
             res = default
         return res
 
     def list_formats(self, info_dict):
         def format_note(fdict):
-            res = u''
+            res = ''
+            if fdict.get('ext') in ['f4f', 'f4m']:
+                res += '(unsupported) '
             if fdict.get('format_note') is not None:
             if fdict.get('format_note') is not None:
-                res += fdict['format_note'] + u' '
+                res += fdict['format_note'] + ' '
+            if fdict.get('tbr') is not None:
+                res += '%4dk ' % fdict['tbr']
             if (fdict.get('vcodec') is not None and
                     fdict.get('vcodec') != 'none'):
             if (fdict.get('vcodec') is not None and
                     fdict.get('vcodec') != 'none'):
-                res += u'%-5s' % fdict['vcodec']
-            elif fdict.get('vbr') is not None:
-                res += u'video'
+                res += '%-5s' % fdict['vcodec']
+                if fdict.get('vbr') is not None:
+                    res += '@'
+            elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
+                res += 'video@'
             if fdict.get('vbr') is not None:
             if fdict.get('vbr') is not None:
-                res += u'@%4dk' % fdict['vbr']
+                res += '%4dk' % fdict['vbr']
             if fdict.get('acodec') is not None:
                 if res:
             if fdict.get('acodec') is not None:
                 if res:
-                    res += u', '
-                res += u'%-5s' % fdict['acodec']
+                    res += ', '
+                res += '%-5s' % fdict['acodec']
             elif fdict.get('abr') is not None:
                 if res:
             elif fdict.get('abr') is not None:
                 if res:
-                    res += u', '
+                    res += ', '
                 res += 'audio'
             if fdict.get('abr') is not None:
                 res += 'audio'
             if fdict.get('abr') is not None:
-                res += u'@%3dk' % fdict['abr']
+                res += '@%3dk' % fdict['abr']
             if fdict.get('filesize') is not None:
                 if res:
             if fdict.get('filesize') is not None:
                 if res:
-                    res += u', '
+                    res += ', '
                 res += format_bytes(fdict['filesize'])
             return res
 
         def line(format, idlen=20):
                 res += format_bytes(fdict['filesize'])
             return res
 
         def line(format, idlen=20):
-            return ((u'%-' + compat_str(idlen + 1) + u's%-10s%-12s%s') % (
+            return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
                 format['format_id'],
                 format['ext'],
                 self.format_resolution(format),
                 format['format_id'],
                 format['ext'],
                 self.format_resolution(format),
@@ -1036,7 +1098,7 @@ class YoutubeDL(object):
             ))
 
         formats = info_dict.get('formats', [info_dict])
             ))
 
         formats = info_dict.get('formats', [info_dict])
-        idlen = max(len(u'format code'),
+        idlen = max(len('format code'),
                     max(len(f['format_id']) for f in formats))
         formats_s = [line(f, idlen) for f in formats]
         if len(formats) > 1:
                     max(len(f['format_id']) for f in formats))
         formats_s = [line(f, idlen) for f in formats]
         if len(formats) > 1:
@@ -1044,10 +1106,10 @@ class YoutubeDL(object):
             formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
 
         header_line = line({
             formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'
 
         header_line = line({
-            'format_id': u'format code', 'ext': u'extension',
-            '_resolution': u'resolution', 'format_note': u'note'}, idlen=idlen)
-        self.to_screen(u'[info] Available formats for %s:\n%s\n%s' %
-                       (info_dict['id'], header_line, u"\n".join(formats_s)))
+            'format_id': 'format code', 'ext': 'extension',
+            'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
+        self.to_screen('[info] Available formats for %s:\n%s\n%s' %
+                       (info_dict['id'], header_line, '\n'.join(formats_s)))
 
     def urlopen(self, req):
         """ Start an HTTP download """
 
     def urlopen(self, req):
         """ Start an HTTP download """
@@ -1056,7 +1118,7 @@ class YoutubeDL(object):
     def print_debug_header(self):
         if not self.params.get('verbose'):
             return
     def print_debug_header(self):
         if not self.params.get('verbose'):
             return
-        write_string(u'[debug] youtube-dl version ' + __version__ + u'\n')
+        write_string('[debug] youtube-dl version ' + __version__ + '\n')
         try:
             sp = subprocess.Popen(
                 ['git', 'rev-parse', '--short', 'HEAD'],
         try:
             sp = subprocess.Popen(
                 ['git', 'rev-parse', '--short', 'HEAD'],
@@ -1065,20 +1127,20 @@ class YoutubeDL(object):
             out, err = sp.communicate()
             out = out.decode().strip()
             if re.match('[0-9a-f]+', out):
             out, err = sp.communicate()
             out = out.decode().strip()
             if re.match('[0-9a-f]+', out):
-                write_string(u'[debug] Git HEAD: ' + out + u'\n')
+                write_string('[debug] Git HEAD: ' + out + '\n')
         except:
             try:
                 sys.exc_clear()
             except:
                 pass
         except:
             try:
                 sys.exc_clear()
             except:
                 pass
-        write_string(u'[debug] Python version %s - %s' %
-                     (platform.python_version(), platform_name()) + u'\n')
+        write_string('[debug] Python version %s - %s' %
+                     (platform.python_version(), platform_name()) + '\n')
 
         proxy_map = {}
         for handler in self._opener.handlers:
             if hasattr(handler, 'proxies'):
                 proxy_map.update(handler.proxies)
 
         proxy_map = {}
         for handler in self._opener.handlers:
             if hasattr(handler, 'proxies'):
                 proxy_map.update(handler.proxies)
-        write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n')
+        write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
 
     def _setup_opener(self):
         timeout_val = self.params.get('socket_timeout')
 
     def _setup_opener(self):
         timeout_val = self.params.get('socket_timeout')
@@ -1108,10 +1170,13 @@ class YoutubeDL(object):
             if 'http' in proxies and 'https' not in proxies:
                 proxies['https'] = proxies['http']
         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
             if 'http' in proxies and 'https' not in proxies:
                 proxies['https'] = proxies['http']
         proxy_handler = compat_urllib_request.ProxyHandler(proxies)
+
+        debuglevel = 1 if self.params.get('debug_printtraffic') else 0
         https_handler = make_HTTPS_handler(
         https_handler = make_HTTPS_handler(
-            self.params.get('nocheckcertificate', False))
+            self.params.get('nocheckcertificate', False), debuglevel=debuglevel)
+        ydlh = YoutubeDLHandler(debuglevel=debuglevel)
         opener = compat_urllib_request.build_opener(
         opener = compat_urllib_request.build_opener(
-            https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
+            https_handler, proxy_handler, cookie_processor, ydlh)
         # Delete the default user-agent header, which would otherwise apply in
         # cases where our custom HTTP handler doesn't come into play
         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
         # Delete the default user-agent header, which would otherwise apply in
         # cases where our custom HTTP handler doesn't come into play
         # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
index 63437301b6fb43f360856646184f7161e2d76c3b..82b1ff4f45e5ad032493093101f0c9439c4590a4 100644 (file)
@@ -38,12 +38,15 @@ __authors__  = (
     'Takuya Tsuchida',
     'Sergey M.',
     'Michael Orlitzky',
     'Takuya Tsuchida',
     'Sergey M.',
     'Michael Orlitzky',
+    'Chris Gahan',
+    'Saimadhav Heblikar',
 )
 
 __license__ = 'Public Domain'
 
 import codecs
 import getpass
 )
 
 __license__ = 'Public Domain'
 
 import codecs
 import getpass
+import locale
 import optparse
 import os
 import random
 import optparse
 import os
 import random
@@ -73,11 +76,12 @@ from .FileDownloader import (
 from .extractor import gen_extractors
 from .version import __version__
 from .YoutubeDL import YoutubeDL
 from .extractor import gen_extractors
 from .version import __version__
 from .YoutubeDL import YoutubeDL
-from .PostProcessor import (
+from .postprocessor import (
     FFmpegMetadataPP,
     FFmpegVideoConvertor,
     FFmpegExtractAudioPP,
     FFmpegEmbedSubtitlePP,
     FFmpegMetadataPP,
     FFmpegVideoConvertor,
     FFmpegExtractAudioPP,
     FFmpegEmbedSubtitlePP,
+    XAttrMetadataPP,
 )
 
 
 )
 
 
@@ -185,16 +189,16 @@ def parseOpts(overrideArguments=None):
     general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
     general.add_option(
         '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
     general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
     general.add_option(
         '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
-        help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .')
+        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
     general.add_option(
         '--no-cache-dir', action='store_const', const=None, dest='cachedir',
         help='Disable filesystem caching')
     general.add_option(
         '--socket-timeout', dest='socket_timeout',
     general.add_option(
         '--no-cache-dir', action='store_const', const=None, dest='cachedir',
         help='Disable filesystem caching')
     general.add_option(
         '--socket-timeout', dest='socket_timeout',
-        type=float, default=None, help=optparse.SUPPRESS_HELP)
+        type=float, default=None, help=u'Time to wait before giving up, in seconds')
     general.add_option(
         '--bidi-workaround', dest='bidi_workaround', action='store_true',
     general.add_option(
         '--bidi-workaround', dest='bidi_workaround', action='store_true',
-        help=u'Work around terminals that lack bidirectional text support. Requires fribidi executable in PATH')
+        help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
 
 
     selection.add_option(
 
 
     selection.add_option(
@@ -213,8 +217,12 @@ def parseOpts(overrideArguments=None):
     selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
     selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
     selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
     selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
     selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
     selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
-    selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
-    selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
+    selection.add_option(
+        '--datebefore', metavar='DATE', dest='datebefore', default=None,
+        help='download only videos uploaded on or before this date (i.e. inclusive)')
+    selection.add_option(
+        '--dateafter', metavar='DATE', dest='dateafter', default=None,
+        help='download only videos uploaded on or after this date (i.e. inclusive)')
     selection.add_option(
         '--min-views', metavar='COUNT', dest='min_views',
         default=None, type=int,
     selection.add_option(
         '--min-views', metavar='COUNT', dest='min_views',
         default=None, type=int,
@@ -239,7 +247,7 @@ def parseOpts(overrideArguments=None):
     authentication.add_option('-n', '--netrc',
             action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
     authentication.add_option('--video-password',
     authentication.add_option('-n', '--netrc',
             action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
     authentication.add_option('--video-password',
-            dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)')
+            dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
 
 
     video_format.add_option('-f', '--format',
 
 
     video_format.add_option('-f', '--format',
@@ -252,7 +260,7 @@ def parseOpts(overrideArguments=None):
     video_format.add_option('--max-quality',
             action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
     video_format.add_option('-F', '--list-formats',
     video_format.add_option('--max-quality',
             action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
     video_format.add_option('-F', '--list-formats',
-            action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
+            action='store_true', dest='listformats', help='list all available formats')
 
     subtitles.add_option('--write-sub', '--write-srt',
             action='store_true', dest='writesubtitles',
 
     subtitles.add_option('--write-sub', '--write-srt',
             action='store_true', dest='writesubtitles',
@@ -326,14 +334,16 @@ def parseOpts(overrideArguments=None):
             action='store_true', dest='verbose', help='print various debugging information', default=False)
     verbosity.add_option('--dump-intermediate-pages',
             action='store_true', dest='dump_intermediate_pages', default=False,
             action='store_true', dest='verbose', help='print various debugging information', default=False)
     verbosity.add_option('--dump-intermediate-pages',
             action='store_true', dest='dump_intermediate_pages', default=False,
-            help='print downloaded pages to debug problems(very verbose)')
+            help='print downloaded pages to debug problems (very verbose)')
     verbosity.add_option('--write-pages',
             action='store_true', dest='write_pages', default=False,
             help='Write downloaded intermediary pages to files in the current directory to debug problems')
     verbosity.add_option('--youtube-print-sig-code',
             action='store_true', dest='youtube_print_sig_code', default=False,
             help=optparse.SUPPRESS_HELP)
     verbosity.add_option('--write-pages',
             action='store_true', dest='write_pages', default=False,
             help='Write downloaded intermediary pages to files in the current directory to debug problems')
     verbosity.add_option('--youtube-print-sig-code',
             action='store_true', dest='youtube_print_sig_code', default=False,
             help=optparse.SUPPRESS_HELP)
-
+    verbosity.add_option('--print-traffic',
+            dest='debug_printtraffic', action='store_true', default=False,
+            help=optparse.SUPPRESS_HELP)
 
     filesystem.add_option('-t', '--title',
             action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
 
     filesystem.add_option('-t', '--title',
             action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
@@ -350,11 +360,11 @@ def parseOpts(overrideArguments=None):
                   '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
                   '%(autonumber)s to get an automatically incremented number, '
                   '%(ext)s for the filename extension, '
                   '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
                   '%(autonumber)s to get an automatically incremented number, '
                   '%(ext)s for the filename extension, '
-                  '%(format)s for the format description (like "22 - 1280x720" or "HD"),'
-                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),'
+                  '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
+                  '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
                   '%(upload_date)s for the upload date (YYYYMMDD), '
                   '%(extractor)s for the provider (youtube, metacafe, etc), '
                   '%(upload_date)s for the upload date (YYYYMMDD), '
                   '%(extractor)s for the provider (youtube, metacafe, etc), '
-                  '%(id)s for the video id , %(playlist)s for the playlist the video is in, '
+                  '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
                   '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
                   'Use - to output to stdout. Can also be used to download to a different directory, '
                   'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
                   '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
                   'Use - to output to stdout. Can also be used to download to a different directory, '
                   'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
@@ -368,7 +378,7 @@ def parseOpts(overrideArguments=None):
             dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
     filesystem.add_option('--load-info',
             dest='load_info_filename', metavar='FILE',
             dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
     filesystem.add_option('--load-info',
             dest='load_info_filename', metavar='FILE',
-            help='json file containing the video information (created with the "--write-json" option')
+            help='json file containing the video information (created with the "--write-json" option)')
     filesystem.add_option('-w', '--no-overwrites',
             action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
     filesystem.add_option('-c', '--continue',
     filesystem.add_option('-w', '--no-overwrites',
             action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
     filesystem.add_option('-c', '--continue',
@@ -412,7 +422,13 @@ def parseOpts(overrideArguments=None):
     postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
             help='embed subtitles in the video (only for mp4 videos)')
     postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
     postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
             help='embed subtitles in the video (only for mp4 videos)')
     postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
-            help='add metadata to the files')
+            help='write metadata to the video file')
+    postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
+            help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
+    postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
+        help='Prefer avconv over ffmpeg for running the postprocessors (default)')
+    postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
+        help='Prefer ffmpeg over avconv for running the postprocessors')
 
 
     parser.add_option_group(general)
 
 
     parser.add_option_group(general)
@@ -473,6 +489,8 @@ def parseOpts(overrideArguments=None):
             write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
             write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
             write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
             write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
             write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
             write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
+            write_string(u'[debug] Encodings: locale %r, fs %r, out %r, pref: %r\n' %
+                         (locale.getpreferredencoding(), sys.getfilesystemencoding(), sys.stdout.encoding, preferredencoding()))
 
     return parser, opts, args
 
 
     return parser, opts, args
 
@@ -517,6 +535,8 @@ def _real_main(argv=None):
             sys.exit(u'ERROR: batch file could not be read')
     all_urls = batchurls + args
     all_urls = [url.strip() for url in all_urls]
             sys.exit(u'ERROR: batch file could not be read')
     all_urls = batchurls + args
     all_urls = [url.strip() for url in all_urls]
+    _enc = preferredencoding()
+    all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
 
     extractors = gen_extractors()
 
 
     extractors = gen_extractors()
 
@@ -546,7 +566,7 @@ def _real_main(argv=None):
     if opts.usenetrc and (opts.username is not None or opts.password is not None):
         parser.error(u'using .netrc conflicts with giving username/password')
     if opts.password is not None and opts.username is None:
     if opts.usenetrc and (opts.username is not None or opts.password is not None):
         parser.error(u'using .netrc conflicts with giving username/password')
     if opts.password is not None and opts.username is None:
-        parser.error(u' account username missing\n')
+        parser.error(u'account username missing\n')
     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
         parser.error(u'using output template conflicts with using title, video ID or auto number')
     if opts.usetitle and opts.useid:
     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
         parser.error(u'using output template conflicts with using title, video ID or auto number')
     if opts.usetitle and opts.useid:
@@ -620,6 +640,7 @@ def _real_main(argv=None):
                      u' template'.format(outtmpl))
 
     any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
                      u' template'.format(outtmpl))
 
     any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
+    download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
 
     ydl_opts = {
         'usenetrc': opts.usenetrc,
 
     ydl_opts = {
         'usenetrc': opts.usenetrc,
@@ -687,12 +708,14 @@ def _real_main(argv=None):
         'cachedir': opts.cachedir,
         'youtube_print_sig_code': opts.youtube_print_sig_code,
         'age_limit': opts.age_limit,
         'cachedir': opts.cachedir,
         'youtube_print_sig_code': opts.youtube_print_sig_code,
         'age_limit': opts.age_limit,
-        'download_archive': opts.download_archive,
+        'download_archive': download_archive_fn,
         'cookiefile': opts.cookiefile,
         'nocheckcertificate': opts.no_check_certificate,
         'proxy': opts.proxy,
         'socket_timeout': opts.socket_timeout,
         'bidi_workaround': opts.bidi_workaround,
         'cookiefile': opts.cookiefile,
         'nocheckcertificate': opts.no_check_certificate,
         'proxy': opts.proxy,
         'socket_timeout': opts.socket_timeout,
         'bidi_workaround': opts.bidi_workaround,
+        'debug_printtraffic': opts.debug_printtraffic,
+        'prefer_ffmpeg': opts.prefer_ffmpeg,
     }
 
     with YoutubeDL(ydl_opts) as ydl:
     }
 
     with YoutubeDL(ydl_opts) as ydl:
@@ -709,6 +732,8 @@ def _real_main(argv=None):
             ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
         if opts.embedsubtitles:
             ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
             ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
         if opts.embedsubtitles:
             ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
+        if opts.xattrs:
+            ydl.add_post_processor(XAttrMetadataPP())
 
         # Update version
         if opts.update_self:
 
         # Update version
         if opts.update_self:
diff --git a/youtube_dl/downloader/__init__.py b/youtube_dl/downloader/__init__.py
new file mode 100644 (file)
index 0000000..f19b490
--- /dev/null
@@ -0,0 +1,23 @@
+from .common import FileDownloader
+from .hls import HlsFD
+from .http import HttpFD
+from .mplayer import MplayerFD
+from .rtmp import RtmpFD
+
+from ..utils import (
+    determine_ext,
+)
+
+def get_suitable_downloader(info_dict):
+    """Get the downloader class that can handle the info dict."""
+    url = info_dict['url']
+
+    if url.startswith('rtmp'):
+        return RtmpFD
+    if determine_ext(url) == u'm3u8':
+        return HlsFD
+    if url.startswith('mms') or url.startswith('rtsp'):
+        return MplayerFD
+    else:
+        return HttpFD
+
diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py
new file mode 100644 (file)
index 0000000..10143d5
--- /dev/null
@@ -0,0 +1,317 @@
+import os
+import re
+import sys
+import time
+
+from ..utils import (
+    encodeFilename,
+    timeconvert,
+    format_bytes,
+)
+
+
+class FileDownloader(object):
+    """File Downloader class.
+
+    File downloader objects are the ones responsible of downloading the
+    actual video file and writing it to disk.
+
+    File downloaders accept a lot of parameters. In order not to saturate
+    the object constructor with arguments, it receives a dictionary of
+    options instead.
+
+    Available options:
+
+    verbose:           Print additional info to stdout.
+    quiet:             Do not print messages to stdout.
+    ratelimit:         Download speed limit, in bytes/sec.
+    retries:           Number of times to retry for HTTP error 5xx
+    buffersize:        Size of download buffer in bytes.
+    noresizebuffer:    Do not automatically resize the download buffer.
+    continuedl:        Try to continue downloads if possible.
+    noprogress:        Do not print the progress bar.
+    logtostderr:       Log messages to stderr instead of stdout.
+    consoletitle:      Display progress in console window's titlebar.
+    nopart:            Do not use temporary .part files.
+    updatetime:        Use the Last-modified header to set output file timestamps.
+    test:              Download only first bytes to test the downloader.
+    min_filesize:      Skip files smaller than this size
+    max_filesize:      Skip files larger than this size
+
+    Subclasses of this one must re-define the real_download method.
+    """
+
+    params = None
+
+    def __init__(self, ydl, params):
+        """Create a FileDownloader object with the given options."""
+        self.ydl = ydl
+        self._progress_hooks = []
+        self.params = params
+
+    @staticmethod
+    def format_seconds(seconds):
+        (mins, secs) = divmod(seconds, 60)
+        (hours, mins) = divmod(mins, 60)
+        if hours > 99:
+            return '--:--:--'
+        if hours == 0:
+            return '%02d:%02d' % (mins, secs)
+        else:
+            return '%02d:%02d:%02d' % (hours, mins, secs)
+
+    @staticmethod
+    def calc_percent(byte_counter, data_len):
+        if data_len is None:
+            return None
+        return float(byte_counter) / float(data_len) * 100.0
+
+    @staticmethod
+    def format_percent(percent):
+        if percent is None:
+            return '---.-%'
+        return '%6s' % ('%3.1f%%' % percent)
+
+    @staticmethod
+    def calc_eta(start, now, total, current):
+        if total is None:
+            return None
+        dif = now - start
+        if current == 0 or dif < 0.001: # One millisecond
+            return None
+        rate = float(current) / dif
+        return int((float(total) - float(current)) / rate)
+
+    @staticmethod
+    def format_eta(eta):
+        if eta is None:
+            return '--:--'
+        return FileDownloader.format_seconds(eta)
+
+    @staticmethod
+    def calc_speed(start, now, bytes):
+        dif = now - start
+        if bytes == 0 or dif < 0.001: # One millisecond
+            return None
+        return float(bytes) / dif
+
+    @staticmethod
+    def format_speed(speed):
+        if speed is None:
+            return '%10s' % '---b/s'
+        return '%10s' % ('%s/s' % format_bytes(speed))
+
+    @staticmethod
+    def best_block_size(elapsed_time, bytes):
+        new_min = max(bytes / 2.0, 1.0)
+        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
+        if elapsed_time < 0.001:
+            return int(new_max)
+        rate = bytes / elapsed_time
+        if rate > new_max:
+            return int(new_max)
+        if rate < new_min:
+            return int(new_min)
+        return int(rate)
+
+    @staticmethod
+    def parse_bytes(bytestr):
+        """Parse a string indicating a byte quantity into an integer."""
+        matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
+        if matchobj is None:
+            return None
+        number = float(matchobj.group(1))
+        multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
+        return int(round(number * multiplier))
+
+    def to_screen(self, *args, **kargs):
+        self.ydl.to_screen(*args, **kargs)
+
+    def to_stderr(self, message):
+        self.ydl.to_screen(message)
+
+    def to_console_title(self, message):
+        self.ydl.to_console_title(message)
+
+    def trouble(self, *args, **kargs):
+        self.ydl.trouble(*args, **kargs)
+
+    def report_warning(self, *args, **kargs):
+        self.ydl.report_warning(*args, **kargs)
+
+    def report_error(self, *args, **kargs):
+        self.ydl.report_error(*args, **kargs)
+
+    def slow_down(self, start_time, byte_counter):
+        """Sleep if the download speed is over the rate limit."""
+        rate_limit = self.params.get('ratelimit', None)
+        if rate_limit is None or byte_counter == 0:
+            return
+        now = time.time()
+        elapsed = now - start_time
+        if elapsed <= 0.0:
+            return
+        speed = float(byte_counter) / elapsed
+        if speed > rate_limit:
+            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
+
+    def temp_name(self, filename):
+        """Returns a temporary filename for the given filename."""
+        if self.params.get('nopart', False) or filename == u'-' or \
+                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
+            return filename
+        return filename + u'.part'
+
+    def undo_temp_name(self, filename):
+        if filename.endswith(u'.part'):
+            return filename[:-len(u'.part')]
+        return filename
+
+    def try_rename(self, old_filename, new_filename):
+        try:
+            if old_filename == new_filename:
+                return
+            os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
+        except (IOError, OSError) as err:
+            self.report_error(u'unable to rename file: %s' % str(err))
+
+    def try_utime(self, filename, last_modified_hdr):
+        """Try to set the last-modified time of the given file."""
+        if last_modified_hdr is None:
+            return
+        if not os.path.isfile(encodeFilename(filename)):
+            return
+        timestr = last_modified_hdr
+        if timestr is None:
+            return
+        filetime = timeconvert(timestr)
+        if filetime is None:
+            return filetime
+        # Ignore obviously invalid dates
+        if filetime == 0:
+            return
+        try:
+            os.utime(filename, (time.time(), filetime))
+        except:
+            pass
+        return filetime
+
+    def report_destination(self, filename):
+        """Report destination filename."""
+        self.to_screen(u'[download] Destination: ' + filename)
+
+    def _report_progress_status(self, msg, is_last_line=False):
+        fullmsg = u'[download] ' + msg
+        if self.params.get('progress_with_newline', False):
+            self.to_screen(fullmsg)
+        else:
+            if os.name == 'nt':
+                prev_len = getattr(self, '_report_progress_prev_line_length',
+                                   0)
+                if prev_len > len(fullmsg):
+                    fullmsg += u' ' * (prev_len - len(fullmsg))
+                self._report_progress_prev_line_length = len(fullmsg)
+                clear_line = u'\r'
+            else:
+                clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
+            self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
+        self.to_console_title(u'youtube-dl ' + msg)
+
+    def report_progress(self, percent, data_len_str, speed, eta):
+        """Report download progress."""
+        if self.params.get('noprogress', False):
+            return
+        if eta is not None:
+            eta_str = self.format_eta(eta)
+        else:
+            eta_str = 'Unknown ETA'
+        if percent is not None:
+            percent_str = self.format_percent(percent)
+        else:
+            percent_str = 'Unknown %'
+        speed_str = self.format_speed(speed)
+
+        msg = (u'%s of %s at %s ETA %s' %
+               (percent_str, data_len_str, speed_str, eta_str))
+        self._report_progress_status(msg)
+
+    def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
+        if self.params.get('noprogress', False):
+            return
+        downloaded_str = format_bytes(downloaded_data_len)
+        speed_str = self.format_speed(speed)
+        elapsed_str = FileDownloader.format_seconds(elapsed)
+        msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
+        self._report_progress_status(msg)
+
+    def report_finish(self, data_len_str, tot_time):
+        """Report download finished."""
+        if self.params.get('noprogress', False):
+            self.to_screen(u'[download] Download completed')
+        else:
+            self._report_progress_status(
+                (u'100%% of %s in %s' %
+                 (data_len_str, self.format_seconds(tot_time))),
+                is_last_line=True)
+
+    def report_resuming_byte(self, resume_len):
+        """Report attempt to resume at given byte."""
+        self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
+
+    def report_retry(self, count, retries):
+        """Report retry in case of HTTP error 5xx"""
+        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+
+    def report_file_already_downloaded(self, file_name):
+        """Report file has already been fully downloaded."""
+        try:
+            self.to_screen(u'[download] %s has already been downloaded' % file_name)
+        except UnicodeEncodeError:
+            self.to_screen(u'[download] The file has already been downloaded')
+
+    def report_unable_to_resume(self):
+        """Report it was impossible to resume download."""
+        self.to_screen(u'[download] Unable to resume')
+
+    def download(self, filename, info_dict):
+        """Download to a filename using the info from info_dict
+        Return True on success and False otherwise
+        """
+        # Check file already present
+        if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
+            self.report_file_already_downloaded(filename)
+            self._hook_progress({
+                'filename': filename,
+                'status': 'finished',
+                'total_bytes': os.path.getsize(encodeFilename(filename)),
+            })
+            return True
+
+        return self.real_download(filename, info_dict)
+
+    def real_download(self, filename, info_dict):
+        """Real download process. Redefine in subclasses."""
+        raise NotImplementedError(u'This method must be implemented by sublcasses')
+
+    def _hook_progress(self, status):
+        for ph in self._progress_hooks:
+            ph(status)
+
+    def add_progress_hook(self, ph):
+        """ ph gets called on download progress, with a dictionary with the entries
+        * filename: The final filename
+        * status: One of "downloading" and "finished"
+
+        It can also have some of the following entries:
+
+        * downloaded_bytes: Bytes on disks
+        * total_bytes: Total bytes, None if unknown
+        * tmpfilename: The filename we're currently writing to
+        * eta: The estimated time in seconds, None if unknown
+        * speed: The download speed in bytes/second, None if unknown
+
+        Hooks are guaranteed to be called at least once (with status "finished")
+        if the download is successful.
+        """
+        self._progress_hooks.append(ph)
+
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
new file mode 100644 (file)
index 0000000..fa98346
--- /dev/null
@@ -0,0 +1,44 @@
+import os
+import subprocess
+
+from .common import FileDownloader
+from ..utils import (
+    encodeFilename,
+)
+
+
+class HlsFD(FileDownloader):
+    def real_download(self, filename, info_dict):
+        url = info_dict['url']
+        self.report_destination(filename)
+        tmpfilename = self.temp_name(filename)
+
+        args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
+            '-bsf:a', 'aac_adtstoasc', tmpfilename]
+
+        for program in ['avconv', 'ffmpeg']:
+            try:
+                subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+                break
+            except (OSError, IOError):
+                pass
+        else:
+            self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
+        cmd = [program] + args
+
+        retval = subprocess.call(cmd)
+        if retval == 0:
+            fsize = os.path.getsize(encodeFilename(tmpfilename))
+            self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
+            self.try_rename(tmpfilename, filename)
+            self._hook_progress({
+                'downloaded_bytes': fsize,
+                'total_bytes': fsize,
+                'filename': filename,
+                'status': 'finished',
+            })
+            return True
+        else:
+            self.to_stderr(u"\n")
+            self.report_error(u'ffmpeg exited with code %d' % retval)
+            return False
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
new file mode 100644 (file)
index 0000000..8407727
--- /dev/null
@@ -0,0 +1,186 @@
+import os
+import time
+
+from .common import FileDownloader
+from ..utils import (
+    compat_urllib_request,
+    compat_urllib_error,
+    ContentTooShortError,
+
+    encodeFilename,
+    sanitize_open,
+    format_bytes,
+)
+
+
+class HttpFD(FileDownloader):
+    def real_download(self, filename, info_dict):
+        url = info_dict['url']
+        tmpfilename = self.temp_name(filename)
+        stream = None
+
+        # Do not include the Accept-Encoding header
+        headers = {'Youtubedl-no-compression': 'True'}
+        if 'user_agent' in info_dict:
+            headers['Youtubedl-user-agent'] = info_dict['user_agent']
+        basic_request = compat_urllib_request.Request(url, None, headers)
+        request = compat_urllib_request.Request(url, None, headers)
+
+        if self.params.get('test', False):
+            request.add_header('Range','bytes=0-10240')
+
+        # Establish possible resume length
+        if os.path.isfile(encodeFilename(tmpfilename)):
+            resume_len = os.path.getsize(encodeFilename(tmpfilename))
+        else:
+            resume_len = 0
+
+        open_mode = 'wb'
+        if resume_len != 0:
+            if self.params.get('continuedl', False):
+                self.report_resuming_byte(resume_len)
+                request.add_header('Range','bytes=%d-' % resume_len)
+                open_mode = 'ab'
+            else:
+                resume_len = 0
+
+        count = 0
+        retries = self.params.get('retries', 0)
+        while count <= retries:
+            # Establish connection
+            try:
+                data = compat_urllib_request.urlopen(request)
+                break
+            except (compat_urllib_error.HTTPError, ) as err:
+                if (err.code < 500 or err.code >= 600) and err.code != 416:
+                    # Unexpected HTTP error
+                    raise
+                elif err.code == 416:
+                    # Unable to resume (requested range not satisfiable)
+                    try:
+                        # Open the connection again without the range header
+                        data = compat_urllib_request.urlopen(basic_request)
+                        content_length = data.info()['Content-Length']
+                    except (compat_urllib_error.HTTPError, ) as err:
+                        if err.code < 500 or err.code >= 600:
+                            raise
+                    else:
+                        # Examine the reported length
+                        if (content_length is not None and
+                                (resume_len - 100 < int(content_length) < resume_len + 100)):
+                            # The file had already been fully downloaded.
+                            # Explanation to the above condition: in issue #175 it was revealed that
+                            # YouTube sometimes adds or removes a few bytes from the end of the file,
+                            # changing the file size slightly and causing problems for some users. So
+                            # I decided to implement a suggested change and consider the file
+                            # completely downloaded if the file size differs less than 100 bytes from
+                            # the one in the hard drive.
+                            self.report_file_already_downloaded(filename)
+                            self.try_rename(tmpfilename, filename)
+                            self._hook_progress({
+                                'filename': filename,
+                                'status': 'finished',
+                            })
+                            return True
+                        else:
+                            # The length does not match, we start the download over
+                            self.report_unable_to_resume()
+                            open_mode = 'wb'
+                            break
+            # Retry
+            count += 1
+            if count <= retries:
+                self.report_retry(count, retries)
+
+        if count > retries:
+            self.report_error(u'giving up after %s retries' % retries)
+            return False
+
+        data_len = data.info().get('Content-length', None)
+        if data_len is not None:
+            data_len = int(data_len) + resume_len
+            min_data_len = self.params.get("min_filesize", None)
+            max_data_len =  self.params.get("max_filesize", None)
+            if min_data_len is not None and data_len < min_data_len:
+                self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
+                return False
+            if max_data_len is not None and data_len > max_data_len:
+                self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
+                return False
+
+        data_len_str = format_bytes(data_len)
+        byte_counter = 0 + resume_len
+        block_size = self.params.get('buffersize', 1024)
+        start = time.time()
+        while True:
+            # Download and write
+            before = time.time()
+            data_block = data.read(block_size)
+            after = time.time()
+            if len(data_block) == 0:
+                break
+            byte_counter += len(data_block)
+
+            # Open file just in time
+            if stream is None:
+                try:
+                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
+                    assert stream is not None
+                    filename = self.undo_temp_name(tmpfilename)
+                    self.report_destination(filename)
+                except (OSError, IOError) as err:
+                    self.report_error(u'unable to open for writing: %s' % str(err))
+                    return False
+            try:
+                stream.write(data_block)
+            except (IOError, OSError) as err:
+                self.to_stderr(u"\n")
+                self.report_error(u'unable to write data: %s' % str(err))
+                return False
+            if not self.params.get('noresizebuffer', False):
+                block_size = self.best_block_size(after - before, len(data_block))
+
+            # Progress message
+            speed = self.calc_speed(start, time.time(), byte_counter - resume_len)
+            if data_len is None:
+                eta = percent = None
+            else:
+                percent = self.calc_percent(byte_counter, data_len)
+                eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
+            self.report_progress(percent, data_len_str, speed, eta)
+
+            self._hook_progress({
+                'downloaded_bytes': byte_counter,
+                'total_bytes': data_len,
+                'tmpfilename': tmpfilename,
+                'filename': filename,
+                'status': 'downloading',
+                'eta': eta,
+                'speed': speed,
+            })
+
+            # Apply rate limit
+            self.slow_down(start, byte_counter - resume_len)
+
+        if stream is None:
+            self.to_stderr(u"\n")
+            self.report_error(u'Did not get any data blocks')
+            return False
+        stream.close()
+        self.report_finish(data_len_str, (time.time() - start))
+        if data_len is not None and byte_counter != data_len:
+            raise ContentTooShortError(byte_counter, int(data_len))
+        self.try_rename(tmpfilename, filename)
+
+        # Update file modification time
+        if self.params.get('updatetime', True):
+            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
+
+        self._hook_progress({
+            'downloaded_bytes': byte_counter,
+            'total_bytes': byte_counter,
+            'filename': filename,
+            'status': 'finished',
+        })
+
+        return True
diff --git a/youtube_dl/downloader/mplayer.py b/youtube_dl/downloader/mplayer.py
new file mode 100644 (file)
index 0000000..67e0e41
--- /dev/null
@@ -0,0 +1,40 @@
+import os
+import subprocess
+
+from .common import FileDownloader
+from ..utils import (
+    encodeFilename,
+)
+
+
+class MplayerFD(FileDownloader):
+    def real_download(self, filename, info_dict):
+        url = info_dict['url']
+        self.report_destination(filename)
+        tmpfilename = self.temp_name(filename)
+
+        args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
+        # Check for mplayer first
+        try:
+            subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+        except (OSError, IOError):
+            self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
+            return False
+
+        # Download using mplayer. 
+        retval = subprocess.call(args)
+        if retval == 0:
+            fsize = os.path.getsize(encodeFilename(tmpfilename))
+            self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
+            self.try_rename(tmpfilename, filename)
+            self._hook_progress({
+                'downloaded_bytes': fsize,
+                'total_bytes': fsize,
+                'filename': filename,
+                'status': 'finished',
+            })
+            return True
+        else:
+            self.to_stderr(u"\n")
+            self.report_error(u'mplayer exited with code %d' % retval)
+            return False
diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
new file mode 100644 (file)
index 0000000..b165e39
--- /dev/null
@@ -0,0 +1,178 @@
+import os
+import re
+import subprocess
+import sys
+import time
+
+from .common import FileDownloader
+from ..utils import (
+    encodeFilename,
+    format_bytes,
+)
+
+
+class RtmpFD(FileDownloader):
+    def real_download(self, filename, info_dict):
+        def run_rtmpdump(args):
+            start = time.time()
+            resume_percent = None
+            resume_downloaded_data_len = None
+            proc = subprocess.Popen(args, stderr=subprocess.PIPE)
+            cursor_in_new_line = True
+            proc_stderr_closed = False
+            while not proc_stderr_closed:
+                # read line from stderr
+                line = u''
+                while True:
+                    char = proc.stderr.read(1)
+                    if not char:
+                        proc_stderr_closed = True
+                        break
+                    if char in [b'\r', b'\n']:
+                        break
+                    line += char.decode('ascii', 'replace')
+                if not line:
+                    # proc_stderr_closed is True
+                    continue
+                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
+                if mobj:
+                    downloaded_data_len = int(float(mobj.group(1))*1024)
+                    percent = float(mobj.group(2))
+                    if not resume_percent:
+                        resume_percent = percent
+                        resume_downloaded_data_len = downloaded_data_len
+                    eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
+                    speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
+                    data_len = None
+                    if percent > 0:
+                        data_len = int(downloaded_data_len * 100 / percent)
+                    data_len_str = u'~' + format_bytes(data_len)
+                    self.report_progress(percent, data_len_str, speed, eta)
+                    cursor_in_new_line = False
+                    self._hook_progress({
+                        'downloaded_bytes': downloaded_data_len,
+                        'total_bytes': data_len,
+                        'tmpfilename': tmpfilename,
+                        'filename': filename,
+                        'status': 'downloading',
+                        'eta': eta,
+                        'speed': speed,
+                    })
+                else:
+                    # no percent for live streams
+                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
+                    if mobj:
+                        downloaded_data_len = int(float(mobj.group(1))*1024)
+                        time_now = time.time()
+                        speed = self.calc_speed(start, time_now, downloaded_data_len)
+                        self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
+                        cursor_in_new_line = False
+                        self._hook_progress({
+                            'downloaded_bytes': downloaded_data_len,
+                            'tmpfilename': tmpfilename,
+                            'filename': filename,
+                            'status': 'downloading',
+                            'speed': speed,
+                        })
+                    elif self.params.get('verbose', False):
+                        if not cursor_in_new_line:
+                            self.to_screen(u'')
+                        cursor_in_new_line = True
+                        self.to_screen(u'[rtmpdump] '+line)
+            proc.wait()
+            if not cursor_in_new_line:
+                self.to_screen(u'')
+            return proc.returncode
+
+        url = info_dict['url']
+        player_url = info_dict.get('player_url', None)
+        page_url = info_dict.get('page_url', None)
+        play_path = info_dict.get('play_path', None)
+        tc_url = info_dict.get('tc_url', None)
+        live = info_dict.get('rtmp_live', False)
+        conn = info_dict.get('rtmp_conn', None)
+
+        self.report_destination(filename)
+        tmpfilename = self.temp_name(filename)
+        test = self.params.get('test', False)
+
+        # Check for rtmpdump first
+        try:
+            subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+        except (OSError, IOError):
+            self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
+            return False
+
+        # Download using rtmpdump. rtmpdump returns exit code 2 when
+        # the connection was interrumpted and resuming appears to be
+        # possible. This is part of rtmpdump's normal usage, AFAIK.
+        basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
+        if player_url is not None:
+            basic_args += ['--swfVfy', player_url]
+        if page_url is not None:
+            basic_args += ['--pageUrl', page_url]
+        if play_path is not None:
+            basic_args += ['--playpath', play_path]
+        if tc_url is not None:
+            basic_args += ['--tcUrl', url]
+        if test:
+            basic_args += ['--stop', '1']
+        if live:
+            basic_args += ['--live']
+        if conn:
+            basic_args += ['--conn', conn]
+        args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
+
+        if sys.platform == 'win32' and sys.version_info < (3, 0):
+            # Windows subprocess module does not actually support Unicode
+            # on Python 2.x
+            # See http://stackoverflow.com/a/9951851/35070
+            subprocess_encoding = sys.getfilesystemencoding()
+            args = [a.encode(subprocess_encoding, 'ignore') for a in args]
+        else:
+            subprocess_encoding = None
+
+        if self.params.get('verbose', False):
+            if subprocess_encoding:
+                str_args = [
+                    a.decode(subprocess_encoding) if isinstance(a, bytes) else a
+                    for a in args]
+            else:
+                str_args = args
+            try:
+                import pipes
+                shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
+            except ImportError:
+                shell_quote = repr
+            self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
+
+        retval = run_rtmpdump(args)
+
+        while (retval == 2 or retval == 1) and not test:
+            prevsize = os.path.getsize(encodeFilename(tmpfilename))
+            self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
+            time.sleep(5.0) # This seems to be needed
+            retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
+            cursize = os.path.getsize(encodeFilename(tmpfilename))
+            if prevsize == cursize and retval == 1:
+                break
+             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
+            if prevsize == cursize and retval == 2 and cursize > 1024:
+                self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
+                retval = 0
+                break
+        if retval == 0 or (test and retval == 2):
+            fsize = os.path.getsize(encodeFilename(tmpfilename))
+            self.to_screen(u'[rtmpdump] %s bytes' % fsize)
+            self.try_rename(tmpfilename, filename)
+            self._hook_progress({
+                'downloaded_bytes': fsize,
+                'total_bytes': fsize,
+                'filename': filename,
+                'status': 'finished',
+            })
+            return True
+        else:
+            self.to_stderr(u"\n")
+            self.report_error(u'rtmpdump exited with code %d' % retval)
+            return False
index a39a1e2f49803161913442236244b1910d27755c..d66f7b02641437bd97ccd6c9aa9b3ef7211d3c9b 100644 (file)
@@ -28,6 +28,7 @@ from .channel9 import Channel9IE
 from .cinemassacre import CinemassacreIE
 from .clipfish import ClipfishIE
 from .clipsyndicate import ClipsyndicateIE
 from .cinemassacre import CinemassacreIE
 from .clipfish import ClipfishIE
 from .clipsyndicate import ClipsyndicateIE
+from .cmt import CMTIE
 from .cnn import CNNIE
 from .collegehumor import CollegeHumorIE
 from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
 from .cnn import CNNIE
 from .collegehumor import CollegeHumorIE
 from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
@@ -51,6 +52,7 @@ from .ehow import EHowIE
 from .eighttracks import EightTracksIE
 from .eitb import EitbIE
 from .escapist import EscapistIE
 from .eighttracks import EightTracksIE
 from .eitb import EitbIE
 from .escapist import EscapistIE
+from .everyonesmixtape import EveryonesMixtapeIE
 from .exfm import ExfmIE
 from .extremetube import ExtremeTubeIE
 from .facebook import FacebookIE
 from .exfm import ExfmIE
 from .extremetube import ExtremeTubeIE
 from .facebook import FacebookIE
@@ -60,11 +62,13 @@ from .fktv import (
     FKTVPosteckeIE,
 )
 from .flickr import FlickrIE
     FKTVPosteckeIE,
 )
 from .flickr import FlickrIE
+from .franceinter import FranceInterIE
 from .francetv import (
     PluzzIE,
     FranceTvInfoIE,
     FranceTVIE,
 from .francetv import (
     PluzzIE,
     FranceTvInfoIE,
     FranceTVIE,
-    GenerationQuoiIE
+    GenerationQuoiIE,
+    CultureboxIE,
 )
 from .freesound import FreesoundIE
 from .funnyordie import FunnyOrDieIE
 )
 from .freesound import FreesoundIE
 from .funnyordie import FunnyOrDieIE
@@ -79,7 +83,10 @@ from .hotnewhiphop import HotNewHipHopIE
 from .howcast import HowcastIE
 from .hypem import HypemIE
 from .ign import IGNIE, OneUPIE
 from .howcast import HowcastIE
 from .hypem import HypemIE
 from .ign import IGNIE, OneUPIE
-from .imdb import ImdbIE
+from .imdb import (
+    ImdbIE,
+    ImdbListIE
+)
 from .ina import InaIE
 from .infoq import InfoQIE
 from .instagram import InstagramIE
 from .ina import InaIE
 from .infoq import InfoQIE
 from .instagram import InstagramIE
@@ -91,17 +98,25 @@ from .ivi import (
 from .jeuxvideo import JeuxVideoIE
 from .jukebox import JukeboxIE
 from .justintv import JustinTVIE
 from .jeuxvideo import JeuxVideoIE
 from .jukebox import JukeboxIE
 from .justintv import JustinTVIE
+from .jpopsukitv import JpopsukiIE
 from .kankan import KankanIE
 from .keezmovies import KeezMoviesIE
 from .kankan import KankanIE
 from .keezmovies import KeezMoviesIE
+from .khanacademy import KhanAcademyIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
 from .liveleak import LiveLeakIE
 from .livestream import LivestreamIE, LivestreamOriginalIE
 from .kickstarter import KickStarterIE
 from .keek import KeekIE
 from .liveleak import LiveLeakIE
 from .livestream import LivestreamIE, LivestreamOriginalIE
+from .lynda import (
+    LyndaIE,
+    LyndaCourseIE
+)
+from .macgamestore import MacGameStoreIE
 from .mdr import MDRIE
 from .metacafe import MetacafeIE
 from .metacritic import MetacriticIE
 from .mit import TechTVMITIE, MITIE
 from .mixcloud import MixcloudIE
 from .mdr import MDRIE
 from .metacafe import MetacafeIE
 from .metacritic import MetacriticIE
 from .mit import TechTVMITIE, MITIE
 from .mixcloud import MixcloudIE
+from .mpora import MporaIE
 from .mofosex import MofosexIE
 from .mtv import MTVIE
 from .muzu import MuzuTVIE
 from .mofosex import MofosexIE
 from .mtv import MTVIE
 from .muzu import MuzuTVIE
@@ -116,6 +131,7 @@ from .newgrounds import NewgroundsIE
 from .nhl import NHLIE, NHLVideocenterIE
 from .niconico import NiconicoIE
 from .ninegag import NineGagIE
 from .nhl import NHLIE, NHLVideocenterIE
 from .niconico import NiconicoIE
 from .ninegag import NineGagIE
+from .novamov import NovamovIE
 from .nowvideo import NowVideoIE
 from .ooyala import OoyalaIE
 from .orf import ORFIE
 from .nowvideo import NowVideoIE
 from .ooyala import OoyalaIE
 from .orf import ORFIE
@@ -189,6 +205,7 @@ from .vimeo import (
     VimeoUserIE,
     VimeoAlbumIE,
     VimeoGroupsIE,
     VimeoUserIE,
     VimeoAlbumIE,
     VimeoGroupsIE,
+    VimeoReviewIE,
 )
 from .vine import VineIE
 from .viki import VikiIE
 )
 from .vine import VineIE
 from .viki import VikiIE
index ac05f824610a1385b3d2403c3a91ffa8ccc45e8a..72f81d01a4976767033ea236eff4d8a5e2e43d33 100644 (file)
@@ -1,3 +1,4 @@
+from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
@@ -5,7 +6,7 @@ from .common import InfoExtractor
 
 class AcademicEarthCourseIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)'
 
 class AcademicEarthCourseIE(InfoExtractor):
     _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)'
-    IE_NAME = u'AcademicEarth:Course'
+    IE_NAME = 'AcademicEarth:Course'
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
index ef5644aa54fe28002dc4d8c76308941c264252e3..922cede056690bac963cdb2f896eb7b9254680af 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -11,46 +13,46 @@ from ..utils import (
 class AppleTrailersIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
     _TEST = {
 class AppleTrailersIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
     _TEST = {
-        u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/",
-        u"playlist": [
+        "url": "http://trailers.apple.com/trailers/wb/manofsteel/",
+        "playlist": [
             {
             {
-                u"file": u"manofsteel-trailer4.mov",
-                u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8",
-                u"info_dict": {
-                    u"duration": 111,
-                    u"title": u"Trailer 4",
-                    u"upload_date": u"20130523",
-                    u"uploader_id": u"wb",
+                "file": "manofsteel-trailer4.mov",
+                "md5": "d97a8e575432dbcb81b7c3acb741f8a8",
+                "info_dict": {
+                    "duration": 111,
+                    "title": "Trailer 4",
+                    "upload_date": "20130523",
+                    "uploader_id": "wb",
                 },
             },
             {
                 },
             },
             {
-                u"file": u"manofsteel-trailer3.mov",
-                u"md5": u"b8017b7131b721fb4e8d6f49e1df908c",
-                u"info_dict": {
-                    u"duration": 182,
-                    u"title": u"Trailer 3",
-                    u"upload_date": u"20130417",
-                    u"uploader_id": u"wb",
+                "file": "manofsteel-trailer3.mov",
+                "md5": "b8017b7131b721fb4e8d6f49e1df908c",
+                "info_dict": {
+                    "duration": 182,
+                    "title": "Trailer 3",
+                    "upload_date": "20130417",
+                    "uploader_id": "wb",
                 },
             },
             {
                 },
             },
             {
-                u"file": u"manofsteel-trailer.mov",
-                u"md5": u"d0f1e1150989b9924679b441f3404d48",
-                u"info_dict": {
-                    u"duration": 148,
-                    u"title": u"Trailer",
-                    u"upload_date": u"20121212",
-                    u"uploader_id": u"wb",
+                "file": "manofsteel-trailer.mov",
+                "md5": "d0f1e1150989b9924679b441f3404d48",
+                "info_dict": {
+                    "duration": 148,
+                    "title": "Trailer",
+                    "upload_date": "20121212",
+                    "uploader_id": "wb",
                 },
             },
             {
                 },
             },
             {
-                u"file": u"manofsteel-teaser.mov",
-                u"md5": u"5fe08795b943eb2e757fa95cb6def1cb",
-                u"info_dict": {
-                    u"duration": 93,
-                    u"title": u"Teaser",
-                    u"upload_date": u"20120721",
-                    u"uploader_id": u"wb",
+                "file": "manofsteel-teaser.mov",
+                "md5": "5fe08795b943eb2e757fa95cb6def1cb",
+                "info_dict": {
+                    "duration": 93,
+                    "title": "Teaser",
+                    "upload_date": "20120721",
+                    "uploader_id": "wb",
                 },
             }
         ]
                 },
             }
         ]
@@ -110,7 +112,8 @@ class AppleTrailersIE(InfoExtractor):
                     'width': format['width'],
                     'height': int(format['height']),
                 })
                     'width': format['width'],
                     'height': int(format['height']),
                 })
-            formats = sorted(formats, key=lambda f: (f['height'], f['width']))
+
+            self._sort_formats(formats)
 
             playlist.append({
                 '_type': 'video',
 
             playlist.append({
                 '_type': 'video',
index 8bb546410f7a7486bdaa964bc724cf2c501e8851..34ce8429b121261784a1645c28e2a33cb76bcacb 100644 (file)
@@ -1,9 +1,10 @@
+from __future__ import unicode_literals
+
 import json
 import re
 
 from .common import InfoExtractor
 from ..utils import (
 import json
 import re
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
     unified_strdate,
 )
 
     unified_strdate,
 )
 
@@ -13,23 +14,22 @@ class ArchiveOrgIE(InfoExtractor):
     IE_DESC = 'archive.org videos'
     _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
     _TEST = {
     IE_DESC = 'archive.org videos'
     _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
     _TEST = {
-        u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
-        u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
-        u'md5': u'8af1d4cf447933ed3c7f4871162602db',
-        u'info_dict': {
-            u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
-            u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
-            u"upload_date": u"19681210",
-            u"uploader": u"SRI International"
+        "url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
+        'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
+        'md5': '8af1d4cf447933ed3c7f4871162602db',
+        'info_dict': {
+            "title": "1968 Demo - FJCC Conference Presentation Reel #1",
+            "description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
+            "upload_date": "19681210",
+            "uploader": "SRI International"
         }
     }
 
         }
     }
 
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
-        json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
+        json_url = url + ('?' if '?' in url else '&') + 'output=json'
         json_data = self._download_webpage(json_url, video_id)
         data = json.loads(json_data)
 
         json_data = self._download_webpage(json_url, video_id)
         data = json.loads(json_data)
 
@@ -38,16 +38,16 @@ class ArchiveOrgIE(InfoExtractor):
         uploader = data['metadata']['creator'][0]
         upload_date = unified_strdate(data['metadata']['date'][0])
 
         uploader = data['metadata']['creator'][0]
         upload_date = unified_strdate(data['metadata']['date'][0])
 
-        formats = [{
+        formats = [
+            {
                 'format': fdata['format'],
                 'url': 'http://' + data['server'] + data['dir'] + fn,
                 'file_size': int(fdata['size']),
             }
                 'format': fdata['format'],
                 'url': 'http://' + data['server'] + data['dir'] + fn,
                 'file_size': int(fdata['size']),
             }
-            for fn,fdata in data['files'].items()
+            for fn, fdata in data['files'].items()
             if 'Video' in fdata['format']]
             if 'Video' in fdata['format']]
-        formats.sort(key=lambda fdata: fdata['file_size'])
-        for f in formats:
-            f['ext'] = determine_ext(f['url'])
+
+        self._sort_formats(formats)
 
         return {
             '_type': 'video',
 
         return {
             '_type': 'video',
index 9254fbfe0de5cb9138cb50deeb4719f94c18f92e..7cf3785ac687a4fae01a53589af26fb11054463d 100644 (file)
@@ -1,4 +1,6 @@
 # encoding: utf-8
 # encoding: utf-8
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -22,7 +24,7 @@ class ArteTvIE(InfoExtractor):
     _LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
     _LIVE_URL = r'index-[0-9]+\.html$'
 
     _LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
     _LIVE_URL = r'index-[0-9]+\.html$'
 
-    IE_NAME = u'arte.tv'
+    IE_NAME = 'arte.tv'
 
     @classmethod
     def suitable(cls, url):
 
     @classmethod
     def suitable(cls, url):
@@ -37,7 +39,7 @@ class ArteTvIE(InfoExtractor):
     #         r'src="(.*?/videothek_js.*?\.js)',
     #         0,
     #         [
     #         r'src="(.*?/videothek_js.*?\.js)',
     #         0,
     #         [
-    #             (1, 'url', u'Invalid URL: %s' % url)
+    #             (1, 'url', 'Invalid URL: %s' % url)
     #         ]
     #     )
     #     http_host = url.split('/')[2]
     #         ]
     #     )
     #     http_host = url.split('/')[2]
@@ -49,12 +51,12 @@ class ArteTvIE(InfoExtractor):
     #             '(rtmp://.*?)\'',
     #         re.DOTALL,
     #         [
     #             '(rtmp://.*?)\'',
     #         re.DOTALL,
     #         [
-    #             (1, 'path',   u'could not extract video path: %s' % url),
-    #             (2, 'player', u'could not extract video player: %s' % url),
-    #             (3, 'url',    u'could not extract video url: %s' % url)
+    #             (1, 'path',   'could not extract video path: %s' % url),
+    #             (2, 'player', 'could not extract video player: %s' % url),
+    #             (3, 'url',    'could not extract video url: %s' % url)
     #         ]
     #     )
     #         ]
     #     )
-    #     video_url = u'%s/%s' % (info.get('url'), info.get('path'))
+    #     video_url = '%s/%s' % (info.get('url'), info.get('path'))
 
     def _real_extract(self, url):
         mobj = re.match(self._VIDEOS_URL, url)
 
     def _real_extract(self, url):
         mobj = re.match(self._VIDEOS_URL, url)
@@ -107,9 +109,9 @@ class ArteTvIE(InfoExtractor):
     def _extract_liveweb(self, url, name, lang):
         """Extract form http://liveweb.arte.tv/"""
         webpage = self._download_webpage(url, name)
     def _extract_liveweb(self, url, name, lang):
         """Extract form http://liveweb.arte.tv/"""
         webpage = self._download_webpage(url, name)
-        video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
+        video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
         config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
         config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
-                                            video_id, u'Downloading information')
+                                            video_id, 'Downloading information')
         event_doc = config_doc.find('event')
         url_node = event_doc.find('video').find('urlHd')
         if url_node is None:
         event_doc = config_doc.find('event')
         url_node = event_doc.find('video').find('urlHd')
         if url_node is None:
@@ -124,7 +126,7 @@ class ArteTvIE(InfoExtractor):
 
 
 class ArteTVPlus7IE(InfoExtractor):
 
 
 class ArteTVPlus7IE(InfoExtractor):
-    IE_NAME = u'arte.tv:+7'
+    IE_NAME = 'arte.tv:+7'
     _VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
 
     @classmethod
     _VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
 
     @classmethod
@@ -207,7 +209,7 @@ class ArteTVPlus7IE(InfoExtractor):
             if bitrate is not None:
                 quality += '-%d' % bitrate
             if format_info.get('versionCode') is not None:
             if bitrate is not None:
                 quality += '-%d' % bitrate
             if format_info.get('versionCode') is not None:
-                format_id = u'%s-%s' % (quality, format_info['versionCode'])
+                format_id = '%s-%s' % (quality, format_info['versionCode'])
             else:
                 format_id = quality
             info = {
             else:
                 format_id = quality
             info = {
@@ -216,7 +218,7 @@ class ArteTVPlus7IE(InfoExtractor):
                 'width': format_info.get('width'),
                 'height': height,
             }
                 'width': format_info.get('width'),
                 'height': height,
             }
-            if format_info['mediaType'] == u'rtmp':
+            if format_info['mediaType'] == 'rtmp':
                 info['url'] = format_info['streamer']
                 info['play_path'] = 'mp4:' + format_info['url']
                 info['ext'] = 'flv'
                 info['url'] = format_info['streamer']
                 info['play_path'] = 'mp4:' + format_info['url']
                 info['ext'] = 'flv'
@@ -231,27 +233,27 @@ class ArteTVPlus7IE(InfoExtractor):
 
 # It also uses the arte_vp_url url from the webpage to extract the information
 class ArteTVCreativeIE(ArteTVPlus7IE):
 
 # It also uses the arte_vp_url url from the webpage to extract the information
 class ArteTVCreativeIE(ArteTVPlus7IE):
-    IE_NAME = u'arte.tv:creative'
+    IE_NAME = 'arte.tv:creative'
     _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
 
     _TEST = {
     _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
 
     _TEST = {
-        u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
-        u'file': u'050489-002.mp4',
-        u'info_dict': {
-            u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
+        'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
+        'file': '050489-002.mp4',
+        'info_dict': {
+            'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design',
         },
     }
 
 
 class ArteTVFutureIE(ArteTVPlus7IE):
         },
     }
 
 
 class ArteTVFutureIE(ArteTVPlus7IE):
-    IE_NAME = u'arte.tv:future'
+    IE_NAME = 'arte.tv:future'
     _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
 
     _TEST = {
     _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
-        u'file': u'050940-003.mp4',
-        u'info_dict': {
-            u'title': u'Les champignons au secours de la planète',
+        'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
+        'file': '050940-003.mp4',
+        'info_dict': {
+            'title': 'Les champignons au secours de la planète',
         },
     }
 
         },
     }
 
@@ -263,7 +265,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
 
 
 class ArteTVDDCIE(ArteTVPlus7IE):
 
 
 class ArteTVDDCIE(ArteTVPlus7IE):
-    IE_NAME = u'arte.tv:ddc'
+    IE_NAME = 'arte.tv:ddc'
     _VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
 
     def _real_extract(self, url):
     _VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
 
     def _real_extract(self, url):
index bcccc0b7a54c8b03b84a3e1303672509577faa66..c6f30e62616c09f50f8826d4d30cc6069cf3367f 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
@@ -7,13 +9,14 @@ from ..utils import (
     ExtractorError,
 )
 
     ExtractorError,
 )
 
+
 class AUEngineIE(InfoExtractor):
     _TEST = {
 class AUEngineIE(InfoExtractor):
     _TEST = {
-        u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
-        u'file': u'lfvlytY6.mp4',
-        u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f',
-        u'info_dict': {
-            u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]"
+        'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
+        'file': 'lfvlytY6.mp4',
+        'md5': '48972bdbcf1a3a2f5533e62425b41d4f',
+        'info_dict': {
+            'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]'
         }
     }
     _VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?'
         }
     }
     _VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?'
@@ -23,7 +26,7 @@ class AUEngineIE(InfoExtractor):
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
         video_id = mobj.group(1)
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
-                webpage, u'title')
+                webpage, 'title')
         title = title.strip()
         links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
         links = map(compat_urllib_parse.unquote, links)
         title = title.strip()
         links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
         links = map(compat_urllib_parse.unquote, links)
@@ -37,7 +40,7 @@ class AUEngineIE(InfoExtractor):
                 video_url = link
         if not video_url:
             raise ExtractorError(u'Could not find video URL')
                 video_url = link
         if not video_url:
             raise ExtractorError(u'Could not find video URL')
-        ext = u'.' + determine_ext(video_url)
+        ext = '.' + determine_ext(video_url)
         if ext == title[-len(ext):]:
             title = title[:-len(ext)]
 
         if ext == title[-len(ext):]:
             title = title[:-len(ext)]
 
index d48c0c38d0ecfc787ce364e015d5a53260b922d4..ccd31c4c7093d54e86df50a42600d08e12e55005 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 import itertools
 import re
 import json
 import itertools
@@ -9,26 +11,26 @@ from ..utils import (
 
 
 class BambuserIE(InfoExtractor):
 
 
 class BambuserIE(InfoExtractor):
-    IE_NAME = u'bambuser'
+    IE_NAME = 'bambuser'
     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
     _API_KEY = '005f64509e19a868399060af746a00aa'
 
     _TEST = {
     _VALID_URL = r'https?://bambuser\.com/v/(?P<id>\d+)'
     _API_KEY = '005f64509e19a868399060af746a00aa'
 
     _TEST = {
-        u'url': u'http://bambuser.com/v/4050584',
+        'url': 'http://bambuser.com/v/4050584',
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
         # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
-        #u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',
-        u'info_dict': {
-            u'id': u'4050584',
-            u'ext': u'flv',
-            u'title': u'Education engineering days - lightning talks',
-            u'duration': 3741,
-            u'uploader': u'pixelversity',
-            u'uploader_id': u'344706',
+        #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
+        'info_dict': {
+            'id': '4050584',
+            'ext': 'flv',
+            'title': 'Education engineering days - lightning talks',
+            'duration': 3741,
+            'uploader': 'pixelversity',
+            'uploader_id': '344706',
         },
         },
-        u'params': {
+        'params': {
             # It doesn't respect the 'Range' header, it would download the whole video
             # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
             # It doesn't respect the 'Range' header, it would download the whole video
             # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59
-            u'skip_download': True,
+            'skip_download': True,
         },
     }
 
         },
     }
 
@@ -53,7 +55,7 @@ class BambuserIE(InfoExtractor):
 
 
 class BambuserChannelIE(InfoExtractor):
 
 
 class BambuserChannelIE(InfoExtractor):
-    IE_NAME = u'bambuser:channel'
+    IE_NAME = 'bambuser:channel'
     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
     _VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
     # The maximum number we can get with each request
     _STEP = 50
@@ -72,7 +74,7 @@ class BambuserChannelIE(InfoExtractor):
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
             info_json = self._download_webpage(req, user,
             # Without setting this header, we wouldn't get any result
             req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
             info_json = self._download_webpage(req, user,
-                u'Downloading page %d' % i)
+                'Downloading page %d' % i)
             results = json.loads(info_json)['result']
             if len(results) == 0:
                 break
             results = json.loads(info_json)['result']
             if len(results) == 0:
                 break
index 3a32c14c598dd2da14841fe68c1cb59582f30799..886b0dfabb7537ebca8a13ce7e4d6d68b9966a4c 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import json
 import re
 
 import json
 import re
 
@@ -10,16 +12,16 @@ from ..utils import (
 
 
 class BandcampIE(InfoExtractor):
 
 
 class BandcampIE(InfoExtractor):
-    IE_NAME = u'Bandcamp'
     _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
     _TESTS = [{
     _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
     _TESTS = [{
-        u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
-        u'file': u'1812978515.mp3',
-        u'md5': u'cdeb30cdae1921719a3cbcab696ef53c',
-        u'info_dict': {
-            u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"
+        'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
+        'file': '1812978515.mp3',
+        'md5': 'c557841d5e50261777a6585648adf439',
+        'info_dict': {
+            "title": "youtube-dl  \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
+            "duration": 10,
         },
         },
-        u'skip': u'There is a limit of 200 free downloads / month for the test song'
+        '_skip': 'There is a limit of 200 free downloads / month for the test song'
     }]
 
     def _real_extract(self, url):
     }]
 
     def _real_extract(self, url):
@@ -30,85 +32,98 @@ class BandcampIE(InfoExtractor):
         m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
         if m_download is None:
             m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
         m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
         if m_download is None:
             m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
-        if m_trackinfo:
-            json_code = m_trackinfo.group(1)
-            data = json.loads(json_code)
-
-            for d in data:
-                formats = [{
-                    'format_id': 'format_id',
-                    'url': format_url,
-                    'ext': format_id.partition('-')[0]
-                } for format_id, format_url in sorted(d['file'].items())]
+            if m_trackinfo:
+                json_code = m_trackinfo.group(1)
+                data = json.loads(json_code)
+                d = data[0]
+
+                duration = int(round(d['duration']))
+                formats = []
+                for format_id, format_url in d['file'].items():
+                    ext, _, abr_str = format_id.partition('-')
+
+                    formats.append({
+                        'format_id': format_id,
+                        'url': format_url,
+                        'ext': format_id.partition('-')[0],
+                        'vcodec': 'none',
+                        'acodec': format_id.partition('-')[0],
+                        'abr': int(format_id.partition('-')[2]),
+                    })
+
+                self._sort_formats(formats)
+
                 return {
                     'id': compat_str(d['id']),
                     'title': d['title'],
                     'formats': formats,
                 return {
                     'id': compat_str(d['id']),
                     'title': d['title'],
                     'formats': formats,
+                    'duration': duration,
                 }
                 }
-        else:
-            raise ExtractorError(u'No free songs found')
+            else:
+                raise ExtractorError('No free songs found')
 
         download_link = m_download.group(1)
 
         download_link = m_download.group(1)
-        id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', 
-                       webpage, re.MULTILINE|re.DOTALL).group('id')
+        video_id = re.search(
+            r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
+            webpage, re.MULTILINE | re.DOTALL).group('id')
 
 
-        download_webpage = self._download_webpage(download_link, id,
+        download_webpage = self._download_webpage(download_link, video_id,
                                                   'Downloading free downloads page')
         # We get the dictionary of the track from some javascrip code
         info = re.search(r'items: (.*?),$',
                          download_webpage, re.MULTILINE).group(1)
         info = json.loads(info)[0]
         # We pick mp3-320 for now, until format selection can be easily implemented.
                                                   'Downloading free downloads page')
         # We get the dictionary of the track from some javascrip code
         info = re.search(r'items: (.*?),$',
                          download_webpage, re.MULTILINE).group(1)
         info = json.loads(info)[0]
         # We pick mp3-320 for now, until format selection can be easily implemented.
-        mp3_info = info[u'downloads'][u'mp3-320']
+        mp3_info = info['downloads']['mp3-320']
         # If we try to use this url it says the link has expired
         # If we try to use this url it says the link has expired
-        initial_url = mp3_info[u'url']
+        initial_url = mp3_info['url']
         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
         m_url = re.match(re_url, initial_url)
         #We build the url we will use to get the final track url
         # This url is build in Bandcamp in the script download_bunde_*.js
         re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
         m_url = re.match(re_url, initial_url)
         #We build the url we will use to get the final track url
         # This url is build in Bandcamp in the script download_bunde_*.js
-        request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
-        final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
+        request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
+        final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
         # If we could correctly generate the .rand field the url would be
         #in the "download_url" key
         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
 
         # If we could correctly generate the .rand field the url would be
         #in the "download_url" key
         final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
 
-        track_info = {'id':id,
-                      'title' : info[u'title'],
-                      'ext' :   'mp3',
-                      'url' :   final_url,
-                      'thumbnail' : info[u'thumb_url'],
-                      'uploader' :  info[u'artist']
-                      }
-
-        return [track_info]
+        return {
+            'id': video_id,
+            'title': info['title'],
+            'ext': 'mp3',
+            'vcodec': 'none',
+            'url': final_url,
+            'thumbnail': info.get('thumb_url'),
+            'uploader': info.get('artist'),
+        }
 
 
 class BandcampAlbumIE(InfoExtractor):
 
 
 class BandcampAlbumIE(InfoExtractor):
-    IE_NAME = u'Bandcamp:album'
+    IE_NAME = 'Bandcamp:album'
     _VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
 
     _TEST = {
     _VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)'
 
     _TEST = {
-        u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
-        u'playlist': [
+        'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
+        'playlist': [
             {
             {
-                u'file': u'1353101989.mp3',
-                u'md5': u'39bc1eded3476e927c724321ddf116cf',
-                u'info_dict': {
-                    u'title': u'Intro',
+                'file': '1353101989.mp3',
+                'md5': '39bc1eded3476e927c724321ddf116cf',
+                'info_dict': {
+                    'title': 'Intro',
                 }
             },
             {
                 }
             },
             {
-                u'file': u'38097443.mp3',
-                u'md5': u'1a2c32e2691474643e912cc6cd4bffaa',
-                u'info_dict': {
-                    u'title': u'Kero One - Keep It Alive (Blazo remix)',
+                'file': '38097443.mp3',
+                'md5': '1a2c32e2691474643e912cc6cd4bffaa',
+                'info_dict': {
+                    'title': 'Kero One - Keep It Alive (Blazo remix)',
                 }
             },
         ],
                 }
             },
         ],
-        u'params': {
-            u'playlistend': 2
+        'params': {
+            'playlistend': 2
         },
         },
-        u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
+        'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
     }
 
     def _real_extract(self, url):
     }
 
     def _real_extract(self, url):
@@ -117,11 +132,11 @@ class BandcampAlbumIE(InfoExtractor):
         webpage = self._download_webpage(url, title)
         tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
         if not tracks_paths:
         webpage = self._download_webpage(url, title)
         tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
         if not tracks_paths:
-            raise ExtractorError(u'The page doesn\'t contain any track')
+            raise ExtractorError('The page doesn\'t contain any tracks')
         entries = [
             self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
             for t_path in tracks_paths]
         entries = [
             self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
             for t_path in tracks_paths]
-        title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title')
+        title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title')
         return {
             '_type': 'playlist',
             'title': title,
         return {
             '_type': 'playlist',
             'title': title,
index 144ce64ccacd35abbbf51392e192dff8a1ca1649..96408e4e093ba6b27b4da54248d436d162d3c40c 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import datetime
 import json
 import re
 import datetime
 import json
 import re
@@ -10,19 +12,19 @@ from ..utils import (
 
 class BlinkxIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
 
 class BlinkxIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
-    _IE_NAME = u'blinkx'
+    IE_NAME = 'blinkx'
 
     _TEST = {
 
     _TEST = {
-        u'url': u'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
-        u'file': u'8aQUy7GV.mp4',
-        u'md5': u'2e9a07364af40163a908edbf10bb2492',
-        u'info_dict': {
-            u"title": u"Police Car Rolls Away",
-            u"uploader": u"stupidvideos.com",
-            u"upload_date": u"20131215",
-            u"description": u"A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
-            u"duration": 14.886,
-            u"thumbnails": [{
+        'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
+        'file': '8aQUy7GV.mp4',
+        'md5': '2e9a07364af40163a908edbf10bb2492',
+        'info_dict': {
+            "title": "Police Car Rolls Away",
+            "uploader": "stupidvideos.com",
+            "upload_date": "20131215",
+            "description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
+            "duration": 14.886,
+            "thumbnails": [{
                 "width": 100,
                 "height": 76,
                 "url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
                 "width": 100,
                 "height": 76,
                 "url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
@@ -30,17 +32,17 @@ class BlinkxIE(InfoExtractor):
         },
     }
 
         },
     }
 
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
+    def _real_extract(self, rl):
+        m = re.match(self._VALID_URL, rl)
         video_id = m.group('id')
         display_id = video_id[:8]
 
         api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
         video_id = m.group('id')
         display_id = video_id[:8]
 
         api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
-                   u'video=%s' % video_id)
+                   'video=%s' % video_id)
         data_json = self._download_webpage(api_url, display_id)
         data = json.loads(data_json)['api']['results'][0]
         dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
         data_json = self._download_webpage(api_url, display_id)
         data = json.loads(data_json)['api']['results'][0]
         dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
-        upload_date = dt.strftime('%Y%m%d')
+        pload_date = dt.strftime('%Y%m%d')
 
         duration = None
         thumbnails = []
 
         duration = None
         thumbnails = []
@@ -61,9 +63,10 @@ class BlinkxIE(InfoExtractor):
             elif m['type'] in ('flv', 'mp4'):
                 vcodec = remove_start(m['vcodec'], 'ff')
                 acodec = remove_start(m['acodec'], 'ff')
             elif m['type'] in ('flv', 'mp4'):
                 vcodec = remove_start(m['vcodec'], 'ff')
                 acodec = remove_start(m['acodec'], 'ff')
+                tbr = (int(m['vbr']) + int(m['abr'])) // 1000
                 format_id = (u'%s-%sk-%s' %
                              (vcodec,
                 format_id = (u'%s-%sk-%s' %
                              (vcodec,
-                              (int(m['vbr']) + int(m['abr'])) // 1000,
+                              tbr,
                               m['w']))
                 formats.append({
                     'format_id': format_id,
                               m['w']))
                 formats.append({
                     'format_id': format_id,
@@ -72,10 +75,12 @@ class BlinkxIE(InfoExtractor):
                     'acodec': acodec,
                     'abr': int(m['abr']) // 1000,
                     'vbr': int(m['vbr']) // 1000,
                     'acodec': acodec,
                     'abr': int(m['abr']) // 1000,
                     'vbr': int(m['vbr']) // 1000,
+                    'tbr': tbr,
                     'width': int(m['w']),
                     'height': int(m['h']),
                 })
                     'width': int(m['w']),
                     'height': int(m['h']),
                 })
-        formats.sort(key=lambda f: (f['width'], f['vbr'], f['abr']))
+
+        self._sort_formats(formats)
 
         return {
             'id': display_id,
 
         return {
             'id': display_id,
@@ -83,7 +88,7 @@ class BlinkxIE(InfoExtractor):
             'title': data['title'],
             'formats': formats,
             'uploader': data['channel_name'],
             'title': data['title'],
             'formats': formats,
             'uploader': data['channel_name'],
-            'upload_date': upload_date,
+            'upload_date': pload_date,
             'description': data.get('description'),
             'thumbnails': thumbnails,
             'duration': duration,
             'description': data.get('description'),
             'thumbnails': thumbnails,
             'duration': duration,
index 5e33a69df42fcbaa1b17f1737d66f5841ca50318..3ce9b53242eb0948c0b9d5fe3b572d577e385bea 100644 (file)
@@ -1,16 +1,15 @@
+from __future__ import unicode_literals
+
 import datetime
 import json
 import datetime
 import json
-import os
 import re
 import socket
 
 from .common import InfoExtractor
 from ..utils import (
     compat_http_client,
 import re
 import socket
 
 from .common import InfoExtractor
 from ..utils import (
     compat_http_client,
-    compat_parse_qs,
     compat_str,
     compat_urllib_error,
     compat_str,
     compat_urllib_error,
-    compat_urllib_parse_urlparse,
     compat_urllib_request,
 
     ExtractorError,
     compat_urllib_request,
 
     ExtractorError,
@@ -22,42 +21,35 @@ class BlipTVIE(InfoExtractor):
     """Information extractor for blip.tv"""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
     """Information extractor for blip.tv"""
 
     _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
-    _URL_EXT = r'^.*\.([a-z0-9]+)$'
-    IE_NAME = u'blip.tv'
+
     _TEST = {
     _TEST = {
-        u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
-        u'file': u'5779306.m4v',
-        u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
-        u'info_dict': {
-            u"upload_date": u"20111205", 
-            u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596", 
-            u"uploader": u"Comic Book Resources - CBR TV", 
-            u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
+        'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
+        'file': '5779306.mov',
+        'md5': 'c6934ad0b6acf2bd920720ec888eb812',
+        'info_dict': {
+            'upload_date': '20111205',
+            'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
+            'uploader': 'Comic Book Resources - CBR TV',
+            'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
         }
     }
 
     def report_direct_download(self, title):
         """Report information extraction."""
         }
     }
 
     def report_direct_download(self, title):
         """Report information extraction."""
-        self.to_screen(u'%s: Direct download detected' % title)
+        self.to_screen('%s: Direct download detected' % title)
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+            raise ExtractorError('Invalid URL: %s' % url)
 
         # See https://github.com/rg3/youtube-dl/issues/857
 
         # See https://github.com/rg3/youtube-dl/issues/857
-        api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
-        if api_mobj is not None:
-            url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
-        urlp = compat_urllib_parse_urlparse(url)
-        if urlp.path.startswith('/play/'):
-            response = self._request_webpage(url, None, False)
-            redirecturl = response.geturl()
-            rurlp = compat_urllib_parse_urlparse(redirecturl)
-            file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
-            url = 'http://blip.tv/a/a-' + file_id
-            return self._real_extract(url)
-
+        embed_mobj = re.search(r'^(?:https?://)?(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url)
+        if embed_mobj:
+            info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1)
+            info_page = self._download_webpage(info_url, embed_mobj.group(1))
+            video_id = self._search_regex(r'data-episode-id="(\d+)', info_page,  'video_id')
+            return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
 
         if '?' in url:
             cchar = '&'
 
         if '?' in url:
             cchar = '&'
@@ -67,67 +59,55 @@ class BlipTVIE(InfoExtractor):
         request = compat_urllib_request.Request(json_url)
         request.add_header('User-Agent', 'iTunes/10.6.1')
         self.report_extraction(mobj.group(1))
         request = compat_urllib_request.Request(json_url)
         request.add_header('User-Agent', 'iTunes/10.6.1')
         self.report_extraction(mobj.group(1))
-        info = None
         urlh = self._request_webpage(request, None, False,
         urlh = self._request_webpage(request, None, False,
-            u'unable to download video info webpage')
-        if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
-            basename = url.split('/')[-1]
-            title,ext = os.path.splitext(basename)
-            title = title.decode('UTF-8')
-            ext = ext.replace('.', '')
-            self.report_direct_download(title)
-            info = {
-                'id': title,
-                'url': url,
-                'uploader': None,
-                'upload_date': None,
-                'title': title,
-                'ext': ext,
-                'urlhandle': urlh
+            'unable to download video info webpage')
+
+        try:
+            json_code_bytes = urlh.read()
+            json_code = json_code_bytes.decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError('Unable to read video info webpage: %s' % compat_str(err))
+
+        try:
+            json_data = json.loads(json_code)
+            if 'Post' in json_data:
+                data = json_data['Post']
+            else:
+                data = json_data
+
+            upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+            formats = []
+            if 'additionalMedia' in data:
+                for f in sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])):
+                    if not int(f['media_width']): # filter m3u8
+                        continue
+                    formats.append({
+                        'url': f['url'],
+                        'format_id': f['role'],
+                        'width': int(f['media_width']),
+                        'height': int(f['media_height']),
+                    })
+            else:
+                formats.append({
+                    'url': data['media']['url'],
+                    'width': int(data['media']['width']),
+                    'height': int(data['media']['height']),
+                })
+
+            self._sort_formats(formats)
+
+            return {
+                'id': compat_str(data['item_id']),
+                'uploader': data['display_name'],
+                'upload_date': upload_date,
+                'title': data['title'],
+                'thumbnail': data['thumbnailUrl'],
+                'description': data['description'],
+                'user_agent': 'iTunes/10.6.1',
+                'formats': formats,
             }
             }
-        if info is None: # Regular URL
-            try:
-                json_code_bytes = urlh.read()
-                json_code = json_code_bytes.decode('utf-8')
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
-
-            try:
-                json_data = json.loads(json_code)
-                if 'Post' in json_data:
-                    data = json_data['Post']
-                else:
-                    data = json_data
-
-                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
-                if 'additionalMedia' in data:
-                    formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
-                    best_format = formats[-1]
-                    video_url = best_format['url']
-                else:
-                    video_url = data['media']['url']
-                umobj = re.match(self._URL_EXT, video_url)
-                if umobj is None:
-                    raise ValueError('Can not determine filename extension')
-                ext = umobj.group(1)
-
-                info = {
-                    'id': compat_str(data['item_id']),
-                    'url': video_url,
-                    'uploader': data['display_name'],
-                    'upload_date': upload_date,
-                    'title': data['title'],
-                    'ext': ext,
-                    'format': data['media']['mimeType'],
-                    'thumbnail': data['thumbnailUrl'],
-                    'description': data['description'],
-                    'player_url': data['embedUrl'],
-                    'user_agent': 'iTunes/10.6.1',
-                }
-            except (ValueError,KeyError) as err:
-                raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
-
-        return [info]
+        except (ValueError, KeyError) as err:
+            raise ExtractorError('Unable to parse video information: %s' % repr(err))
 
 
 class BlipTVUserIE(InfoExtractor):
 
 
 class BlipTVUserIE(InfoExtractor):
@@ -135,19 +115,19 @@ class BlipTVUserIE(InfoExtractor):
 
     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
     _PAGE_SIZE = 12
 
     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
     _PAGE_SIZE = 12
-    IE_NAME = u'blip.tv:user'
+    IE_NAME = 'blip.tv:user'
 
     def _real_extract(self, url):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
 
     def _real_extract(self, url):
         # Extract username
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+            raise ExtractorError('Invalid URL: %s' % url)
 
         username = mobj.group(1)
 
         page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
 
 
         username = mobj.group(1)
 
         page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
 
-        page = self._download_webpage(url, username, u'Downloading user page')
+        page = self._download_webpage(url, username, 'Downloading user page')
         mobj = re.search(r'data-users-id="([^"]+)"', page)
         page_base = page_base % mobj.group(1)
 
         mobj = re.search(r'data-users-id="([^"]+)"', page)
         page_base = page_base % mobj.group(1)
 
@@ -163,7 +143,7 @@ class BlipTVUserIE(InfoExtractor):
         while True:
             url = page_base + "&page=" + str(pagenum)
             page = self._download_webpage(url, username,
         while True:
             url = page_base + "&page=" + str(pagenum)
             page = self._download_webpage(url, username,
-                                          u'Downloading video ids from page %d' % pagenum)
+                                          'Downloading video ids from page %d' % pagenum)
 
             # Extract video identifiers
             ids_in_page = []
 
             # Extract video identifiers
             ids_in_page = []
@@ -185,6 +165,6 @@ class BlipTVUserIE(InfoExtractor):
 
             pagenum += 1
 
 
             pagenum += 1
 
-        urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+        urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
         return [self.playlist_result(url_entries, playlist_title = username)]
         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
         return [self.playlist_result(url_entries, playlist_title = username)]
index 755d9c9ef2a093289df91409097320908ea06df7..d18bc7e0ca5c9d5556d60e354e8b0d9f351e89f7 100644 (file)
@@ -1,6 +1,7 @@
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
+from .ooyala import OoyalaIE
 
 
 class BloombergIE(InfoExtractor):
 
 
 class BloombergIE(InfoExtractor):
@@ -23,5 +24,5 @@ class BloombergIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         webpage = self._download_webpage(url, name)
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         webpage = self._download_webpage(url, name)
-        ooyala_url = self._og_search_video_url(webpage)
-        return self.url_result(ooyala_url, ie='Ooyala')
+        ooyala_code = self._search_regex(r'<source src="http://player.ooyala.com/player/[^/]+/([^".]+)', webpage, u'ooyala url')
+        return OoyalaIE._build_url_result(ooyala_code)
index f7f0041c0872f84349d2ee060ef8ada9aed9d6bd..8ac38f4aa71e1fe63e51e1267f4a80919431621f 100644 (file)
@@ -1,4 +1,5 @@
 # encoding: utf-8
 # encoding: utf-8
+from __future__ import unicode_literals
 
 import re
 import json
 
 import re
 import json
@@ -13,6 +14,7 @@ from ..utils import (
     compat_urllib_request,
 
     ExtractorError,
     compat_urllib_request,
 
     ExtractorError,
+    unsmuggle_url,
 )
 
 
 )
 
 
@@ -24,47 +26,47 @@ class BrightcoveIE(InfoExtractor):
     _TESTS = [
         {
             # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
     _TESTS = [
         {
             # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
-            u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
-            u'file': u'2371591881001.mp4',
-            u'md5': u'5423e113865d26e40624dce2e4b45d95',
-            u'note': u'Test Brightcove downloads and detection in GenericIE',
-            u'info_dict': {
-                u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
-                u'uploader': u'8TV',
-                u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
+            'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
+            'file': '2371591881001.mp4',
+            'md5': '5423e113865d26e40624dce2e4b45d95',
+            'note': 'Test Brightcove downloads and detection in GenericIE',
+            'info_dict': {
+                'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
+                'uploader': '8TV',
+                'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
             }
         },
         {
             # From http://medianetwork.oracle.com/video/player/1785452137001
             }
         },
         {
             # From http://medianetwork.oracle.com/video/player/1785452137001
-            u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
-            u'file': u'1785452137001.flv',
-            u'info_dict': {
-                u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
-                u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
-                u'uploader': u'Oracle',
+            'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
+            'file': '1785452137001.flv',
+            'info_dict': {
+                'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
+                'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
+                'uploader': 'Oracle',
             },
         },
         {
             # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
             },
         },
         {
             # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
-            u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
-            u'info_dict': {
-                u'id': u'2750934548001',
-                u'ext': u'mp4',
-                u'title': u'This Bracelet Acts as a Personal Thermostat',
-                u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0',
-                u'uploader': u'Mashable',
+            'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
+            'info_dict': {
+                'id': '2750934548001',
+                'ext': 'mp4',
+                'title': 'This Bracelet Acts as a Personal Thermostat',
+                'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
+                'uploader': 'Mashable',
             },
         },
         {
             # test that the default referer works
             # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
             },
         },
         {
             # test that the default referer works
             # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
-            u'url': u'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
-            u'info_dict': {
-                u'id': u'2878862109001',
-                u'ext': u'mp4',
-                u'title': u'Lost in Motion II',
-                u'description': u'md5:363109c02998fee92ec02211bd8000df',
-                u'uploader': u'National Ballet of Canada',
+            'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
+            'info_dict': {
+                'id': '2878862109001',
+                'ext': 'mp4',
+                'title': 'Lost in Motion II',
+                'description': 'md5:363109c02998fee92ec02211bd8000df',
+                'uploader': 'National Ballet of Canada',
             },
         },
     ]
             },
         },
     ]
@@ -80,13 +82,13 @@ class BrightcoveIE(InfoExtractor):
         object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
                             lambda m: m.group(1) + '/>', object_str)
         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
         object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
                             lambda m: m.group(1) + '/>', object_str)
         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
-        object_str = object_str.replace(u'<--', u'<!--')
+        object_str = object_str.replace('<--', '<!--')
 
         object_doc = xml.etree.ElementTree.fromstring(object_str)
 
         object_doc = xml.etree.ElementTree.fromstring(object_str)
-        assert u'BrightcoveExperience' in object_doc.attrib['class']
-        params = {'flashID': object_doc.attrib['id'],
-                  'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
-                  }
+        assert 'BrightcoveExperience' in object_doc.attrib['class']
+        params = {
+            'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
+        }
         def find_param(name):
             node = find_xpath_attr(object_doc, './param', 'name', name)
             if node is not None:
         def find_param(name):
             node = find_xpath_attr(object_doc, './param', 'name', name)
             if node is not None:
@@ -120,6 +122,8 @@ class BrightcoveIE(InfoExtractor):
             return None
 
     def _real_extract(self, url):
             return None
 
     def _real_extract(self, url):
+        url, smuggled_data = unsmuggle_url(url, {})
+
         # Change the 'videoId' and others field to '@videoPlayer'
         url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
         # Change bckey (used by bcove.me urls) to playerKey
         # Change the 'videoId' and others field to '@videoPlayer'
         url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
         # Change bckey (used by bcove.me urls) to playerKey
@@ -130,9 +134,10 @@ class BrightcoveIE(InfoExtractor):
 
         videoPlayer = query.get('@videoPlayer')
         if videoPlayer:
 
         videoPlayer = query.get('@videoPlayer')
         if videoPlayer:
-            return self._get_video_info(videoPlayer[0], query_str, query,
-                # We set the original url as the default 'Referer' header
-                referer=url)
+            # We set the original url as the default 'Referer' header
+            referer = smuggled_data.get('Referer', url)
+            return self._get_video_info(
+                videoPlayer[0], query_str, query, referer=referer)
         else:
             player_key = query['playerKey']
             return self._get_playlist_info(player_key[0])
         else:
             player_key = query['playerKey']
             return self._get_playlist_info(player_key[0])
@@ -156,11 +161,11 @@ class BrightcoveIE(InfoExtractor):
 
     def _get_playlist_info(self, player_key):
         playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
 
     def _get_playlist_info(self, player_key):
         playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
-                                               player_key, u'Downloading playlist information')
+                                               player_key, 'Downloading playlist information')
 
         json_data = json.loads(playlist_info)
         if 'videoList' not in json_data:
 
         json_data = json.loads(playlist_info)
         if 'videoList' not in json_data:
-            raise ExtractorError(u'Empty playlist')
+            raise ExtractorError('Empty playlist')
         playlist_info = json_data['videoList']
         videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
 
         playlist_info = json_data['videoList']
         videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
 
@@ -189,5 +194,5 @@ class BrightcoveIE(InfoExtractor):
                 'url': video_info['FLVFullLengthURL'],
             })
         else:
                 'url': video_info['FLVFullLengthURL'],
             })
         else:
-            raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
+            raise ExtractorError('Unable to extract video url for %s' % info['id'])
         return info
         return info
index dc3a8d47d164912590d88d42c0fc072a99faeee4..690bc7c25fe2574faa473b122e8427137599c3cd 100644 (file)
@@ -1,21 +1,21 @@
 # coding: utf-8
 # coding: utf-8
+from __future__ import unicode_literals
 
 import re
 import json
 
 from .common import InfoExtractor
 
 import re
 import json
 
 from .common import InfoExtractor
-from ..utils import determine_ext
+
 
 class C56IE(InfoExtractor):
     _VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
 
 class C56IE(InfoExtractor):
     _VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
-    IE_NAME = u'56.com'
-
-    _TEST ={
-        u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html',
-        u'file': u'93440716.flv',
-        u'md5': u'e59995ac63d0457783ea05f93f12a866',
-        u'info_dict': {
-            u'title': u'网事知多少 第32期:车怒',
+    IE_NAME = '56.com'
+    _TEST = {
+        'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
+        'file': '93440716.flv',
+        'md5': 'e59995ac63d0457783ea05f93f12a866',
+        'info_dict': {
+            'title': '网事知多少 第32期:车怒',
         },
     }
 
         },
     }
 
@@ -23,14 +23,18 @@ class C56IE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
         text_id = mobj.group('textid')
         info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
         mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
         text_id = mobj.group('textid')
         info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
-                                           text_id, u'Downloading video info')
+                                           text_id, 'Downloading video info')
         info = json.loads(info_page)['info']
         info = json.loads(info_page)['info']
-        best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1]
-        video_url = best_format['url']
+        formats = [{
+            'format_id': f['type'],
+            'filesize': int(f['filesize']),
+            'url': f['url']
+        } for f in info['rfiles']]
+        self._sort_formats(formats)
 
 
-        return {'id': info['vid'],
-                'title': info['Subject'],
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': info.get('bimg') or info.get('img'),
-                }
+        return {
+            'id': info['vid'],
+            'title': info['Subject'],
+            'formats': formats,
+            'thumbnail': info.get('bimg') or info.get('img'),
+        }
index ae70ea22967a8d880ba15fa1bb64f32904139094..574881b70de67b9521b5e813f0cafa6da59d1068 100644 (file)
@@ -76,14 +76,18 @@ class Channel9IE(InfoExtractor):
             </div>)?                                                # File size part may be missing
         '''
         # Extract known formats
             </div>)?                                                # File size part may be missing
         '''
         # Extract known formats
-        formats = [{'url': x.group('url'),
-                 'format_id': x.group('quality'),
-                 'format_note': x.group('note'),
-                 'format': '%s (%s)' % (x.group('quality'), x.group('note')), 
-                 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
-                 } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
-        # Sort according to known formats list
-        formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id']))
+        formats = [{
+            'url': x.group('url'),
+            'format_id': x.group('quality'),
+            'format_note': x.group('note'),
+            'format': u'%s (%s)' % (x.group('quality'), x.group('note')),
+            'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
+            'preference': self._known_formats.index(x.group('quality')),
+            'vcodec': 'none' if x.group('note') == 'Audio only' else None,
+        } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
+
+        self._sort_formats(formats)
+
         return formats
 
     def _extract_title(self, html):
         return formats
 
     def _extract_title(self, html):
diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py
new file mode 100644 (file)
index 0000000..88e0e9a
--- /dev/null
@@ -0,0 +1,19 @@
+from .mtv import MTVIE
+
+class CMTIE(MTVIE):
+    IE_NAME = u'cmt.com'
+    _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
+    _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/'
+
+    _TESTS = [
+        {
+            u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
+            u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2',
+            u'info_dict': {
+                u'id': u'989124',
+                u'ext': u'mp4',
+                u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
+                u'description': u'Blame It All On My Roots',
+            },
+        },
+    ]
index a034bb2fb6288fc62d964021405aa94eff532ff6..c9e7cc561e4ce55183c8ab50d9946b0aab9d9b12 100644 (file)
@@ -1,7 +1,12 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
-from ..utils import determine_ext
+from ..utils import (
+    int_or_none,
+    parse_duration,
+)
 
 
 class CNNIE(InfoExtractor):
 
 
 class CNNIE(InfoExtractor):
@@ -9,12 +14,14 @@ class CNNIE(InfoExtractor):
         (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
 
     _TESTS = [{
         (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
 
     _TESTS = [{
-        u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
-        u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
-        u'md5': u'3e6121ea48df7e2259fe73a0628605c4',
-        u'info_dict': {
-            u'title': u'Nadal wins 8th French Open title',
-            u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
+        'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
+        'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
+        'md5': '3e6121ea48df7e2259fe73a0628605c4',
+        'info_dict': {
+            'title': 'Nadal wins 8th French Open title',
+            'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
+            'duration': 135,
+            'upload_date': '20130609',
         },
     },
     {
         },
     },
     {
@@ -31,26 +38,62 @@ class CNNIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         path = mobj.group('path')
         page_title = mobj.group('title')
         mobj = re.match(self._VALID_URL, url)
         path = mobj.group('path')
         page_title = mobj.group('title')
-        info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path
+        info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path
         info = self._download_xml(info_url, page_title)
 
         formats = []
         info = self._download_xml(info_url, page_title)
 
         formats = []
+        rex = re.compile(r'''(?x)
+            (?P<width>[0-9]+)x(?P<height>[0-9]+)
+            (?:_(?P<bitrate>[0-9]+)k)?
+        ''')
         for f in info.findall('files/file'):
         for f in info.findall('files/file'):
-            mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate'])
-            if mf is not None:
-                formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text))
-        formats = sorted(formats)
-        (_,_,_, video_path) = formats[-1]
-        video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path
+            video_url = 'http://ht.cdn.turner.com/cnn/big%s' % (f.text.strip())
+            fdct = {
+                'format_id': f.attrib['bitrate'],
+                'url': video_url,
+            }
+
+            mf = rex.match(f.attrib['bitrate'])
+            if mf:
+                fdct['width'] = int(mf.group('width'))
+                fdct['height'] = int(mf.group('height'))
+                fdct['tbr'] = int_or_none(mf.group('bitrate'))
+            else:
+                mf = rex.search(f.text)
+                if mf:
+                    fdct['width'] = int(mf.group('width'))
+                    fdct['height'] = int(mf.group('height'))
+                    fdct['tbr'] = int_or_none(mf.group('bitrate'))
+                else:
+                    mi = re.match(r'ios_(audio|[0-9]+)$', f.attrib['bitrate'])
+                    if mi:
+                        if mi.group(1) == 'audio':
+                            fdct['vcodec'] = 'none'
+                            fdct['ext'] = 'm4a'
+                        else:
+                            fdct['tbr'] = int(mi.group(1))
+
+            formats.append(fdct)
+
+        self._sort_formats(formats)
 
         thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
         thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
 
 
         thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
         thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
 
-        return {'id': info.attrib['id'],
-                'title': info.find('headline').text,
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                'thumbnail': thumbnails[-1][1],
-                'thumbnails': thumbs_dict,
-                'description': info.find('description').text,
-                }
+        metas_el = info.find('metas')
+        upload_date = (
+            metas_el.attrib.get('version') if metas_el is not None else None)
+
+        duration_el = info.find('length')
+        duration = parse_duration(duration_el.text)
+
+        return {
+            'id': info.attrib['id'],
+            'title': info.find('headline').text,
+            'formats': formats,
+            'thumbnail': thumbnails[-1][1],
+            'thumbnails': thumbs_dict,
+            'description': info.find('description').text,
+            'duration': duration,
+            'upload_date': upload_date,
+        }
index b27c1dfc52401f3c148d48d2b2897d2b06db3834..d10b7bd0cda2161838e83da741fe7a2f6c88f575 100644 (file)
@@ -1,82 +1,68 @@
+from __future__ import unicode_literals
+
+import json
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
-from ..utils import (
-    compat_urllib_parse_urlparse,
-    determine_ext,
-
-    ExtractorError,
-)
 
 
 class CollegeHumorIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 
     _TESTS = [{
 
 
 class CollegeHumorIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
 
     _TESTS = [{
-        u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
-        u'file': u'6902724.mp4',
-        u'md5': u'1264c12ad95dca142a9f0bf7968105a0',
-        u'info_dict': {
-            u'title': u'Comic-Con Cosplay Catastrophe',
-            u'description': u'Fans get creative this year at San Diego.  Too creative.  And yes, that\'s really Joss Whedon.',
+        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
+        'file': '6902724.mp4',
+        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
+        'info_dict': {
+            'title': 'Comic-Con Cosplay Catastrophe',
+            'description': 'Fans get creative this year at San Diego.  Too',
+            'age_limit': 13,
         },
     },
     {
         },
     },
     {
-        u'url': u'http://www.collegehumor.com/video/3505939/font-conference',
-        u'file': u'3505939.mp4',
-        u'md5': u'c51ca16b82bb456a4397987791a835f5',
-        u'info_dict': {
-            u'title': u'Font Conference',
-            u'description': u'This video wasn\'t long enough, so we made it double-spaced.',
+        'url': 'http://www.collegehumor.com/video/3505939/font-conference',
+        'file': '3505939.mp4',
+        'md5': '72fa701d8ef38664a4dbb9e2ab721816',
+        'info_dict': {
+            'title': 'Font Conference',
+            'description': 'This video wasn\'t long enough, so we made it double-spaced.',
+            'age_limit': 10,
         },
     }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         },
     }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
         video_id = mobj.group('videoid')
 
         video_id = mobj.group('videoid')
 
-        info = {
-            'id': video_id,
-            'uploader': None,
-            'upload_date': None,
-        }
-
-        self.report_extraction(video_id)
-        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
-        mdoc = self._download_xml(xmlUrl, video_id,
-                                         u'Downloading info XML',
-                                         u'Unable to download video info XML')
+        jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json'
+        data = json.loads(self._download_webpage(
+            jsonUrl, video_id, 'Downloading info JSON'))
+        vdata = data['video']
 
 
-        try:
-            videoNode = mdoc.findall('./video')[0]
-            youtubeIdNode = videoNode.find('./youtubeID')
-            if youtubeIdNode is not None:
-                return self.url_result(youtubeIdNode.text, 'Youtube')
-            info['description'] = videoNode.findall('./description')[0].text
-            info['title'] = videoNode.findall('./caption')[0].text
-            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
-            next_url = videoNode.findall('./file')[0].text
-        except IndexError:
-            raise ExtractorError(u'Invalid metadata XML file')
-
-        if next_url.endswith(u'manifest.f4m'):
-            manifest_url = next_url + '?hdcore=2.10.3'
-            adoc = self._download_xml(manifest_url, video_id,
-                                         u'Downloading XML manifest',
-                                         u'Unable to download video info XML')
-
-            try:
-                video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
-            except IndexError:
-                raise ExtractorError(u'Invalid manifest file')
-            url_pr = compat_urllib_parse_urlparse(info['thumbnail'])
-            info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','')
-            info['ext'] = 'mp4'
+        AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0}
+        rating = vdata.get('rating')
+        if rating:
+            age_limit = AGE_LIMITS.get(rating.lower())
         else:
         else:
-            # Old-style direct links
-            info['url'] = next_url
-            info['ext'] = determine_ext(info['url'])
+            age_limit = None  # None = No idea
+
+        PREFS = {'high_quality': 2, 'low_quality': 0}
+        formats = []
+        for format_key in ('mp4', 'webm'):
+            for qname, qurl in vdata[format_key].items():
+                formats.append({
+                    'format_id': format_key + '_' + qname,
+                    'url': qurl,
+                    'format': format_key,
+                    'preference': PREFS.get(qname),
+                })
+        self._sort_formats(formats)
 
 
-        return info
+        return {
+            'id': video_id,
+            'title': vdata['title'],
+            'description': vdata.get('description'),
+            'thumbnail': vdata.get('thumbnail'),
+            'formats': formats,
+            'age_limit': age_limit,
+        }
index a54ce3ee7c44727a9e56b1ab8359bd099b48bb35..27bd8256e6bf6dfb8dc7ae7997a29c2162d863d2 100644 (file)
@@ -12,7 +12,9 @@ from ..utils import (
 
 
 class ComedyCentralIE(MTVServicesInfoExtractor):
 
 
 class ComedyCentralIE(MTVServicesInfoExtractor):
-    _VALID_URL = r'https?://(?:www.)?comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)'
+    _VALID_URL = r'''(?x)https?://(?:www.)?comedycentral.com/
+        (video-clips|episodes|cc-studios|video-collections)
+        /(?P<title>.*)'''
     _FEED_URL = u'http://comedycentral.com/feeds/mrss/'
 
     _TEST = {
     _FEED_URL = u'http://comedycentral.com/feeds/mrss/'
 
     _TEST = {
index ba46a7bc77d17ed4bcf4dcf7764b1d39f4799958..ce3d1690304c8a0b488c2f674c48bb4ef34c86b5 100644 (file)
@@ -1,4 +1,5 @@
 import base64
 import base64
+import json
 import os
 import re
 import socket
 import os
 import re
 import socket
@@ -9,6 +10,7 @@ import xml.etree.ElementTree
 from ..utils import (
     compat_http_client,
     compat_urllib_error,
 from ..utils import (
     compat_http_client,
     compat_urllib_error,
+    compat_urllib_parse_urlparse,
     compat_str,
 
     clean_html,
     compat_str,
 
     clean_html,
@@ -37,10 +39,12 @@ class InfoExtractor(object):
     id:             Video identifier.
     title:          Video title, unescaped.
 
     id:             Video identifier.
     title:          Video title, unescaped.
 
-    Additionally, it must contain either a formats entry or url and ext:
+    Additionally, it must contain either a formats entry or a url one:
 
 
-    formats:        A list of dictionaries for each format available, it must
-                    be ordered from worst to best quality. Potential fields:
+    formats:        A list of dictionaries for each format available, ordered
+                    from worst to best quality.
+
+                    Potential fields:
                     * url        Mandatory. The URL of the video file
                     * ext        Will be calculated from url if missing
                     * format     A human-readable description of the format
                     * url        Mandatory. The URL of the video file
                     * ext        Will be calculated from url if missing
                     * format     A human-readable description of the format
@@ -48,23 +52,36 @@ class InfoExtractor(object):
                                  Calculated from the format_id, width, height.
                                  and format_note fields if missing.
                     * format_id  A short description of the format
                                  Calculated from the format_id, width, height.
                                  and format_note fields if missing.
                     * format_id  A short description of the format
-                                 ("mp4_h264_opus" or "19")
+                                 ("mp4_h264_opus" or "19").
+                                Technically optional, but strongly recommended.
                     * format_note Additional info about the format
                                  ("3D" or "DASH video")
                     * width      Width of the video, if known
                     * height     Height of the video, if known
                     * format_note Additional info about the format
                                  ("3D" or "DASH video")
                     * width      Width of the video, if known
                     * height     Height of the video, if known
+                    * resolution Textual description of width and height
+                    * tbr        Average bitrate of audio and video in KBit/s
                     * abr        Average audio bitrate in KBit/s
                     * acodec     Name of the audio codec in use
                     * vbr        Average video bitrate in KBit/s
                     * vcodec     Name of the video codec in use
                     * filesize   The number of bytes, if known in advance
                     * player_url SWF Player URL (used for rtmpdump).
                     * abr        Average audio bitrate in KBit/s
                     * acodec     Name of the audio codec in use
                     * vbr        Average video bitrate in KBit/s
                     * vcodec     Name of the video codec in use
                     * filesize   The number of bytes, if known in advance
                     * player_url SWF Player URL (used for rtmpdump).
+                    * protocol   The protocol that will be used for the actual
+                                 download, lower-case.
+                                 "http", "https", "rtsp", "rtmp" or so.
+                    * preference Order number of this format. If this field is
+                                 present and not None, the formats get sorted
+                                 by this field.
+                                 -1 for default (order by other properties),
+                                 -2 or smaller for less than default.
+                    * quality    Order number of the video quality of this
+                                 format, irrespective of the file format.
+                                 -1 for default (order by other properties),
+                                 -2 or smaller for less than default.
     url:            Final video URL.
     ext:            Video filename extension.
     format:         The video format, defaults to ext (used for --get-format)
     player_url:     SWF Player URL (used for rtmpdump).
     url:            Final video URL.
     ext:            Video filename extension.
     format:         The video format, defaults to ext (used for --get-format)
     player_url:     SWF Player URL (used for rtmpdump).
-    urlhandle:      [internal] The urlHandle to be used to download the file,
-                    like returned by urllib.request.urlopen
 
     The following fields are optional:
 
 
     The following fields are optional:
 
@@ -244,6 +261,20 @@ class InfoExtractor(object):
             xml_string = transform_source(xml_string)
         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
 
             xml_string = transform_source(xml_string)
         return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
 
+    def _download_json(self, url_or_request, video_id,
+                       note=u'Downloading JSON metadata',
+                       errnote=u'Unable to download JSON metadata'):
+        json_string = self._download_webpage(url_or_request, video_id, note, errnote)
+        try:
+            return json.loads(json_string)
+        except ValueError as ve:
+            raise ExtractorError('Failed to download JSON', cause=ve)
+
+    def report_warning(self, msg, video_id=None):
+        idstr = u'' if video_id is None else u'%s: ' % video_id
+        self._downloader.report_warning(
+            u'[%s] %s%s' % (self.IE_NAME, idstr, msg))
+
     def to_screen(self, msg):
         """Print msg to screen, prefixing it with '[ie_name]'"""
         self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
     def to_screen(self, msg):
         """Print msg to screen, prefixing it with '[ie_name]'"""
         self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
@@ -361,7 +392,7 @@ class InfoExtractor(object):
     @staticmethod
     def _og_regexes(prop):
         content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')'
     @staticmethod
     def _og_regexes(prop):
         content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')'
-        property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop)
+        property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
         template = r'<meta[^>]+?%s[^>]+?%s'
         return [
             template % (property_re, content_re),
         template = r'<meta[^>]+?%s[^>]+?%s'
         return [
             template % (property_re, content_re),
@@ -426,6 +457,58 @@ class InfoExtractor(object):
         }
         return RATING_TABLE.get(rating.lower(), None)
 
         }
         return RATING_TABLE.get(rating.lower(), None)
 
+    def _sort_formats(self, formats):
+        def _formats_key(f):
+            # TODO remove the following workaround
+            from ..utils import determine_ext
+            if not f.get('ext') and 'url' in f:
+                f['ext'] = determine_ext(f['url'])
+
+            preference = f.get('preference')
+            if preference is None:
+                proto = f.get('protocol')
+                if proto is None:
+                    proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
+
+                preference = 0 if proto in ['http', 'https'] else -0.1
+                if f.get('ext') in ['f4f', 'f4m']:  # Not yet supported
+                    preference -= 0.5
+
+            if f.get('vcodec') == 'none':  # audio only
+                if self._downloader.params.get('prefer_free_formats'):
+                    ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus']
+                else:
+                    ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a']
+                ext_preference = 0
+                try:
+                    audio_ext_preference = ORDER.index(f['ext'])
+                except ValueError:
+                    audio_ext_preference = -1
+            else:
+                if self._downloader.params.get('prefer_free_formats'):
+                    ORDER = [u'flv', u'mp4', u'webm']
+                else:
+                    ORDER = [u'webm', u'flv', u'mp4']
+                try:
+                    ext_preference = ORDER.index(f['ext'])
+                except ValueError:
+                    ext_preference = -1
+                audio_ext_preference = 0
+
+            return (
+                preference,
+                f.get('quality') if f.get('quality') is not None else -1,
+                f.get('height') if f.get('height') is not None else -1,
+                f.get('width') if f.get('width') is not None else -1,
+                ext_preference,
+                f.get('tbr') if f.get('tbr') is not None else -1,
+                f.get('vbr') if f.get('vbr') is not None else -1,
+                f.get('abr') if f.get('abr') is not None else -1,
+                audio_ext_preference,
+                f.get('filesize') if f.get('filesize') is not None else -1,
+                f.get('format_id'),
+            )
+        formats.sort(key=_formats_key)
 
 
 class SearchInfoExtractor(InfoExtractor):
 
 
 class SearchInfoExtractor(InfoExtractor):
index f336a3c620a04e8bb643309b4812725e8f50e1d1..03b75b80d3b3019a75f89f2a773c4c4753afe2eb 100644 (file)
@@ -1,4 +1,5 @@
 # coding: utf-8
 # coding: utf-8
+from __future__ import unicode_literals
 
 import re
 import json
 
 import re
 import json
@@ -20,30 +21,31 @@ class CondeNastIE(InfoExtractor):
 
     # The keys are the supported sites and the values are the name to be shown
     # to the user and in the extractor description.
 
     # The keys are the supported sites and the values are the name to be shown
     # to the user and in the extractor description.
-    _SITES = {'wired': u'WIRED',
-              'gq': u'GQ',
-              'vogue': u'Vogue',
-              'glamour': u'Glamour',
-              'wmagazine': u'W Magazine',
-              'vanityfair': u'Vanity Fair',
-              }
+    _SITES = {
+        'wired': 'WIRED',
+        'gq': 'GQ',
+        'vogue': 'Vogue',
+        'glamour': 'Glamour',
+        'wmagazine': 'W Magazine',
+        'vanityfair': 'Vanity Fair',
+    }
 
     _VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
 
     _VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
-    IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
+    IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
 
     _TEST = {
 
     _TEST = {
-        u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
-        u'file': u'5171b343c2b4c00dd0c1ccb3.mp4',
-        u'md5': u'1921f713ed48aabd715691f774c451f7',
-        u'info_dict': {
-            u'title': u'3D Printed Speakers Lit With LED',
-            u'description': u'Check out these beautiful 3D printed LED speakers.  You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
+        'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
+        'file': '5171b343c2b4c00dd0c1ccb3.mp4',
+        'md5': '1921f713ed48aabd715691f774c451f7',
+        'info_dict': {
+            'title': '3D Printed Speakers Lit With LED',
+            'description': 'Check out these beautiful 3D printed LED speakers.  You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
         }
     }
 
     def _extract_series(self, url, webpage):
         title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
         }
     }
 
     def _extract_series(self, url, webpage):
         title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
-                                        webpage, u'series title', flags=re.DOTALL)
+                                        webpage, 'series title', flags=re.DOTALL)
         url_object = compat_urllib_parse_urlparse(url)
         base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
         m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
         url_object = compat_urllib_parse_urlparse(url)
         base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
         m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
@@ -57,39 +59,41 @@ class CondeNastIE(InfoExtractor):
         description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
                                                r'<div class="video-post-content">(.+?)</div>',
                                                ],
         description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
                                                r'<div class="video-post-content">(.+?)</div>',
                                                ],
-                                              webpage, u'description',
+                                              webpage, 'description',
                                               fatal=False, flags=re.DOTALL)
         params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
                                               fatal=False, flags=re.DOTALL)
         params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
-                                    u'player params', flags=re.DOTALL)
-        video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id')
-        player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id')
-        target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target')
+                                    'player params', flags=re.DOTALL)
+        video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
+        player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
+        target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
         data = compat_urllib_parse.urlencode({'videoId': video_id,
                                               'playerId': player_id,
                                               'target': target,
                                               })
         base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
         data = compat_urllib_parse.urlencode({'videoId': video_id,
                                               'playerId': player_id,
                                               'target': target,
                                               })
         base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
-                                           webpage, u'base info url',
+                                           webpage, 'base info url',
                                            default='http://player.cnevids.com/player/loader.js?')
         info_url = base_info_url + data
         info_page = self._download_webpage(info_url, video_id,
                                            default='http://player.cnevids.com/player/loader.js?')
         info_url = base_info_url + data
         info_page = self._download_webpage(info_url, video_id,
-                                           u'Downloading video info')
-        video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info')
+                                           'Downloading video info')
+        video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
         video_info = json.loads(video_info)
 
         video_info = json.loads(video_info)
 
-        def _formats_sort_key(f):
-            type_ord = 1 if f['type'] == 'video/mp4' else 0
-            quality_ord = 1 if f['quality'] == 'high' else 0
-            return (quality_ord, type_ord)
-        best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1]
+        formats = [{
+            'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
+            'url': fdata['src'],
+            'ext': fdata['type'].split('/')[-1],
+            'quality': 1 if fdata['quality'] == 'high' else 0,
+        } for fdata in video_info['sources'][0]]
+        self._sort_formats(formats)
 
 
-        return {'id': video_id,
-                'url': best_format['src'],
-                'ext': best_format['type'].split('/')[-1],
-                'title': video_info['title'],
-                'thumbnail': video_info['poster_frame'],
-                'description': description,
-                }
+        return {
+            'id': video_id,
+            'formats': formats,
+            'title': video_info['title'],
+            'thumbnail': video_info['poster_frame'],
+            'description': description,
+        }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
index d5730684dc497b37d7ff57098f5a156ff620e40e..a2cbd4d8d592fd2dd6899361a3ab8f4bb3b8a8ed 100644 (file)
@@ -1,20 +1,25 @@
+from __future__ import unicode_literals
+
+import json
 import re
 
 from .common import InfoExtractor
 from ..utils import (
 import re
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_urllib_parse,
+    unescapeHTML,
 )
 
 )
 
+
 class CSpanIE(InfoExtractor):
     _VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)'
 class CSpanIE(InfoExtractor):
     _VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)'
+    IE_DESC = 'C-SPAN'
     _TEST = {
     _TEST = {
-        u'url': u'http://www.c-spanvideo.org/program/HolderonV',
-        u'file': u'315139.flv',
-        u'md5': u'74a623266956f69e4df0068ab6c80fe4',
-        u'info_dict': {
-            u"title": u"Attorney General Eric Holder on Voting Rights Act Decision"
+        'url': 'http://www.c-spanvideo.org/program/HolderonV',
+        'file': '315139.mp4',
+        'md5': '8e44ce11f0f725527daccc453f553eb0',
+        'info_dict': {
+            'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
+            'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in [Shelby County v. Holder] in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
         },
         },
-        u'skip': u'Requires rtmpdump'
     }
 
     def _real_extract(self, url):
     }
 
     def _real_extract(self, url):
@@ -22,30 +27,22 @@ class CSpanIE(InfoExtractor):
         prog_name = mobj.group(1)
         webpage = self._download_webpage(url, prog_name)
         video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
         prog_name = mobj.group(1)
         webpage = self._download_webpage(url, prog_name)
         video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
-        data = compat_urllib_parse.urlencode({'programid': video_id,
-                                              'dynamic':'1'})
-        info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data
-        video_info = self._download_webpage(info_url, video_id, u'Downloading video info')
-
-        self.report_extraction(video_id)
-
-        title = self._html_search_regex(r'<string name="title">(.*?)</string>',
-                                        video_info, 'title')
-        description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"',
-                                              webpage, 'description',
-                                              flags=re.MULTILINE|re.DOTALL)
-
-        url = self._search_regex(r'<string name="URL">(.*?)</string>',
-                                 video_info, 'video url')
-        url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443')
-        path = self._search_regex(r'<string name="path">(.*?)</string>',
-                            video_info, 'rtmp play path')
-
-        return {'id': video_id,
-                'title': title,
-                'ext': 'flv',
-                'url': url,
-                'play_path': path,
-                'description': description,
-                'thumbnail': self._og_search_thumbnail(webpage),
-                }
+
+        title = self._html_search_regex(
+            r'<!-- title -->\n\s*<h1[^>]*>(.*?)</h1>', webpage, 'title')
+        description = self._og_search_description(webpage)
+
+        info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
+        data_json = self._download_webpage(
+            info_url, video_id, 'Downloading video info')
+        data = json.loads(data_json)
+
+        url = unescapeHTML(data['video']['files'][0]['path']['#text'])
+
+        return {
+            'id': video_id,
+            'title': title,
+            'url': url,
+            'description': description,
+            'thumbnail': self._og_search_thumbnail(webpage),
+        }
index 424d960da95fc54ed7759e89a941f87b3da59801..c5529f8d455963e7f7ff6ad798f27493a0579410 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -5,15 +7,14 @@ from .common import InfoExtractor
 
 
 class DefenseGouvFrIE(InfoExtractor):
 
 
 class DefenseGouvFrIE(InfoExtractor):
-    _IE_NAME = 'defense.gouv.fr'
+    IE_NAME = 'defense.gouv.fr'
     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
         r'ligthboxvideo/base-de-medias/webtv/(.*)')
 
     _TEST = {
     _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
         r'ligthboxvideo/base-de-medias/webtv/(.*)')
 
     _TEST = {
-        u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/'
-        u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'),
-        u'file': u'11213.mp4',
-        u'md5': u'75bba6124da7e63d2d60b5244ec9430c',
+        'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
+        'file': '11213.mp4',
+        'md5': '75bba6124da7e63d2d60b5244ec9430c',
         "info_dict": {
             "title": "attaque-chimique-syrienne-du-21-aout-2013-1"
         }
         "info_dict": {
             "title": "attaque-chimique-syrienne-du-21-aout-2013-1"
         }
index cb7226f82a6af167569286918a56cce64e796150..0b11d1f10e18e4358b35f76d0a0e0816b00eaa4c 100644 (file)
@@ -4,18 +4,17 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
     unified_strdate,
 )
 
 
 class DreiSatIE(InfoExtractor):
     IE_NAME = '3sat'
     unified_strdate,
 )
 
 
 class DreiSatIE(InfoExtractor):
     IE_NAME = '3sat'
-    _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/index\.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
+    _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
     _TEST = {
         u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
     _TEST = {
         u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
-        u'file': u'36983.webm',
-        u'md5': u'57c97d0469d71cf874f6815aa2b7c944',
+        u'file': u'36983.mp4',
+        u'md5': u'9dcfe344732808dbfcc901537973c922',
         u'info_dict': {
             u"title": u"Kaffeeland Schweiz",
             u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...", 
         u'info_dict': {
             u"title": u"Kaffeeland Schweiz",
             u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...", 
@@ -52,18 +51,12 @@ class DreiSatIE(InfoExtractor):
             'width': int(fe.find('./width').text),
             'height': int(fe.find('./height').text),
             'url': fe.find('./url').text,
             'width': int(fe.find('./width').text),
             'height': int(fe.find('./height').text),
             'url': fe.find('./url').text,
-            'ext': determine_ext(fe.find('./url').text),
             'filesize': int(fe.find('./filesize').text),
             'video_bitrate': int(fe.find('./videoBitrate').text),
             'filesize': int(fe.find('./filesize').text),
             'video_bitrate': int(fe.find('./videoBitrate').text),
-            '3sat_qualityname': fe.find('./quality').text,
         } for fe in format_els
             if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
 
         } for fe in format_els
             if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
 
-        def _sortkey(format):
-            qidx = ['low', 'med', 'high', 'veryhigh'].index(format['3sat_qualityname'])
-            prefer_http = 1 if 'rtmp' in format['url'] else 0
-            return (qidx, prefer_http, format['video_bitrate'])
-        formats.sort(key=_sortkey)
+        self._sort_formats(formats)
 
         return {
             '_type': 'video',
 
         return {
             '_type': 'video',
diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py
new file mode 100644 (file)
index 0000000..12829cb
--- /dev/null
@@ -0,0 +1,69 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_request,
+    ExtractorError,
+)
+
+
+class EveryonesMixtapeIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
+
+    _TEST = {
+        'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
+        'file': '5bfseWNmlds.mp4',
+        "info_dict": {
+            "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
+            "uploader": "FKR.TV",
+            "uploader_id": "frenchkissrecords",
+            "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
+            "upload_date": "20081015"
+        },
+        'params': {
+            'skip_download': True,  # This is simply YouTube
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        playlist_id = mobj.group('id')
+
+        pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
+        pllist_req = compat_urllib_request.Request(pllist_url)
+        pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
+
+        playlist_list = self._download_json(
+            pllist_req, playlist_id, note='Downloading playlist metadata')
+        try:
+            playlist_no = next(playlist['id']
+                               for playlist in playlist_list
+                               if playlist['code'] == playlist_id)
+        except StopIteration:
+            raise ExtractorError('Playlist id not found')
+
+        pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
+        pl_req = compat_urllib_request.Request(pl_url)
+        pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
+        playlist = self._download_json(
+            pl_req, playlist_id, note='Downloading playlist info')
+
+        entries = [{
+            '_type': 'url',
+            'url': t['url'],
+            'title': t['title'],
+        } for t in playlist['tracks']]
+
+        if mobj.group('songnr'):
+            songnr = int(mobj.group('songnr')) - 1
+            return entries[songnr]
+
+        playlist_title = playlist['mixData']['name']
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': playlist_title,
+            'entries': entries,
+        }
index e1d2f0526a42169fbbba96789a413d3d20fec6a6..21ea5ec2bf1c499149809971b780daa9d10d0291 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
@@ -11,13 +13,13 @@ class FlickrIE(InfoExtractor):
     """Information Extractor for Flickr videos"""
     _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
     _TEST = {
     """Information Extractor for Flickr videos"""
     _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
     _TEST = {
-        u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
-        u'file': u'5645318632.mp4',
-        u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b',
-        u'info_dict': {
-            u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", 
-            u"uploader_id": u"forestwander-nature-pictures", 
-            u"title": u"Dark Hollow Waterfalls"
+        'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
+        'file': '5645318632.mp4',
+        'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
+        'info_dict': {
+            "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", 
+            "uploader_id": "forestwander-nature-pictures", 
+            "title": "Dark Hollow Waterfalls"
         }
     }
 
         }
     }
 
@@ -29,13 +31,13 @@ class FlickrIE(InfoExtractor):
         webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
 
         webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
         webpage = self._download_webpage(webpage_url, video_id)
 
-        secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
+        secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, 'secret')
 
         first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
 
         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
 
         first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
         first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
 
         node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
-            first_xml, u'node_id')
+            first_xml, 'node_id')
 
         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
 
         second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
         second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
@@ -44,7 +46,7 @@ class FlickrIE(InfoExtractor):
 
         mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
         if mobj is None:
 
         mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
         if mobj is None:
-            raise ExtractorError(u'Unable to extract video url')
+            raise ExtractorError('Unable to extract video url')
         video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
 
         return [{
         video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
 
         return [{
diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py
new file mode 100644 (file)
index 0000000..deb1b0b
--- /dev/null
@@ -0,0 +1,38 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class FranceInterIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
+    _TEST = {
+        'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
+        'file': '793962.mp3',
+        'md5': '4764932e466e6f6c79c317d2e74f6884',
+        "info_dict": {
+            "title": "L’Histoire dans les jeux vidéo",
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        title = self._html_search_regex(
+            r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
+        path = self._search_regex(
+            r'&urlAOD=(.*?)&startTime', webpage, 'video url')
+        video_url = 'http://www.franceinter.fr/' + path
+
+        return {
+            'id': video_id,
+            'formats': [{
+                'url': video_url,
+                'vcodec': 'none',
+            }],
+            'title': title,
+        }
index ad85bc16d7796cfcf42331a05bb0392e773f70c5..b32ff9f867a02147d8ddc145cba685f074e6155c 100644 (file)
@@ -191,3 +191,29 @@ class GenerationQuoiIE(InfoExtractor):
         info = json.loads(info_json)
         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
             ie='Dailymotion')
         info = json.loads(info_json)
         return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
             ie='Dailymotion')
+
+
+class CultureboxIE(FranceTVBaseInfoExtractor):
+    IE_NAME = u'culturebox.francetvinfo.fr'
+    _VALID_URL = r'https?://culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
+
+    _TEST = {
+        u'url': u'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
+        u'info_dict': {
+            u'id': u'EV_6785',
+            u'ext': u'mp4',
+            u'title': u'Einstein on the beach au Théâtre du Châtelet',
+            u'description': u'md5:9ce2888b1efefc617b5e58b3f6200eeb',
+        },
+        u'params': {
+            # m3u8 download
+            u'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        name = mobj.group('name')
+        webpage = self._download_webpage(url, name)
+        video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, u'video id')
+        return self._extract_video(video_id)
index 26b7d2ae531f785bc3177af4029652c531d840da..380ebbe556355b8fc0f49e825fa15433d5180b0b 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -13,12 +15,12 @@ from ..utils import (
 class GameSpotIE(InfoExtractor):
     _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
     _TEST = {
 class GameSpotIE(InfoExtractor):
     _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
     _TEST = {
-        u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
-        u"file": u"gs-2300-6410818.mp4",
-        u"md5": u"b2a30deaa8654fcccd43713a6b6a4825",
-        u"info_dict": {
-            u"title": u"Arma 3 - Community Guide: SITREP I",
-            u'description': u'Check out this video where some of the basics of Arma 3 is explained.',
+        "url": "http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
+        "file": "gs-2300-6410818.mp4",
+        "md5": "b2a30deaa8654fcccd43713a6b6a4825",
+        "info_dict": {
+            "title": "Arma 3 - Community Guide: SITREP I",
+            'description': 'Check out this video where some of the basics of Arma 3 is explained.',
         }
     }
 
         }
     }
 
index 7a14c98f9b6ef9d550606c72c330d0730ec1233e..839530982e7db4704ece2a589420ab4ecfc31c66 100644 (file)
@@ -1,9 +1,12 @@
 # encoding: utf-8
 
 # encoding: utf-8
 
+from __future__ import unicode_literals
+
 import os
 import re
 
 from .common import InfoExtractor
 import os
 import re
 
 from .common import InfoExtractor
+from .youtube import YoutubeIE
 from ..utils import (
     compat_urllib_error,
     compat_urllib_parse,
 from ..utils import (
     compat_urllib_error,
     compat_urllib_parse,
@@ -22,78 +25,78 @@ from .ooyala import OoyalaIE
 
 
 class GenericIE(InfoExtractor):
 
 
 class GenericIE(InfoExtractor):
-    IE_DESC = u'Generic downloader that works on some sites'
+    IE_DESC = 'Generic downloader that works on some sites'
     _VALID_URL = r'.*'
     _VALID_URL = r'.*'
-    IE_NAME = u'generic'
+    IE_NAME = 'generic'
     _TESTS = [
         {
     _TESTS = [
         {
-            u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
-            u'file': u'13601338388002.mp4',
-            u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',
-            u'info_dict': {
-                u"uploader": u"www.hodiho.fr",
-                u"title": u"R\u00e9gis plante sa Jeep"
+            'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
+            'file': '13601338388002.mp4',
+            'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd',
+            'info_dict': {
+                'uploader': 'www.hodiho.fr',
+                'title': 'R\u00e9gis plante sa Jeep',
             }
         },
         # embedded vimeo video
         {
             }
         },
         # embedded vimeo video
         {
-            u'add_ie': ['Vimeo'],
-            u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
-            u'file': u'22444065.mp4',
-            u'md5': u'2903896e23df39722c33f015af0666e2',
-            u'info_dict': {
-                u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
-                u"uploader_id": u"skillsmatter",
-                u"uploader": u"Skills Matter",
+            'add_ie': ['Vimeo'],
+            'url': 'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references',
+            'file': '22444065.mp4',
+            'md5': '2903896e23df39722c33f015af0666e2',
+            'info_dict': {
+                'title': 'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011',
+                'uploader_id': 'skillsmatter',
+                'uploader': 'Skills Matter',
             }
         },
         # bandcamp page with custom domain
         {
             }
         },
         # bandcamp page with custom domain
         {
-            u'add_ie': ['Bandcamp'],
-            u'url': u'http://bronyrock.com/track/the-pony-mash',
-            u'file': u'3235767654.mp3',
-            u'info_dict': {
-                u'title': u'The Pony Mash',
-                u'uploader': u'M_Pallante',
+            'add_ie': ['Bandcamp'],
+            'url': 'http://bronyrock.com/track/the-pony-mash',
+            'file': '3235767654.mp3',
+            'info_dict': {
+                'title': 'The Pony Mash',
+                'uploader': 'M_Pallante',
             },
             },
-            u'skip': u'There is a limit of 200 free downloads / month for the test song',
+            'skip': 'There is a limit of 200 free downloads / month for the test song',
         },
         # embedded brightcove video
         # it also tests brightcove videos that need to set the 'Referer' in the
         # http requests
         {
         },
         # embedded brightcove video
         # it also tests brightcove videos that need to set the 'Referer' in the
         # http requests
         {
-            u'add_ie': ['Brightcove'],
-            u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
-            u'info_dict': {
-                u'id': u'2765128793001',
-                u'ext': u'mp4',
-                u'title': u'Le cours de bourse : l’analyse technique',
-                u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9',
-                u'uploader': u'BFM BUSINESS',
+            'add_ie': ['Brightcove'],
+            'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
+            'info_dict': {
+                'id': '2765128793001',
+                'ext': 'mp4',
+                'title': 'Le cours de bourse : l’analyse technique',
+                'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
+                'uploader': 'BFM BUSINESS',
             },
             },
-            u'params': {
-                u'skip_download': True,
+            'params': {
+                'skip_download': True,
             },
         },
         # Direct link to a video
         {
             },
         },
         # Direct link to a video
         {
-            u'url': u'http://media.w3.org/2010/05/sintel/trailer.mp4',
-            u'file': u'trailer.mp4',
-            u'md5': u'67d406c2bcb6af27fa886f31aa934bbe',
-            u'info_dict': {
-                u'id': u'trailer',
-                u'title': u'trailer',
-                u'upload_date': u'20100513',
+            'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
+            'file': 'trailer.mp4',
+            'md5': '67d406c2bcb6af27fa886f31aa934bbe',
+            'info_dict': {
+                'id': 'trailer',
+                'title': 'trailer',
+                'upload_date': '20100513',
             }
         },
         # ooyala video
         {
             }
         },
         # ooyala video
         {
-            u'url': u'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
-            u'md5': u'5644c6ca5d5782c1d0d350dad9bd840c',
-            u'info_dict': {
-                u'id': u'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
-                u'ext': u'mp4',
-                u'title': u'2cc213299525360.mov', #that's what we get
+            'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
+            'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
+            'info_dict': {
+                'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
+                'ext': 'mp4',
+                'title': '2cc213299525360.mov', #that's what we get
             },
         },
     ]
             },
         },
     ]
@@ -101,12 +104,12 @@ class GenericIE(InfoExtractor):
     def report_download_webpage(self, video_id):
         """Report webpage download."""
         if not self._downloader.params.get('test', False):
     def report_download_webpage(self, video_id):
         """Report webpage download."""
         if not self._downloader.params.get('test', False):
-            self._downloader.report_warning(u'Falling back on generic information extractor.')
+            self._downloader.report_warning('Falling back on generic information extractor.')
         super(GenericIE, self).report_download_webpage(video_id)
 
     def report_following_redirect(self, new_url):
         """Report information extraction."""
         super(GenericIE, self).report_download_webpage(video_id)
 
     def report_following_redirect(self, new_url):
         """Report information extraction."""
-        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
+        self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
 
     def _send_head(self, url):
         """Check if it is a redirect, like url shorteners, in case return the new url."""
 
     def _send_head(self, url):
         """Check if it is a redirect, like url shorteners, in case return the new url."""
@@ -152,7 +155,7 @@ class GenericIE(InfoExtractor):
 
         response = opener.open(HEADRequest(url))
         if response is None:
 
         response = opener.open(HEADRequest(url))
         if response is None:
-            raise ExtractorError(u'Invalid URL protocol')
+            raise ExtractorError('Invalid URL protocol')
         return response
 
     def _real_extract(self, url):
         return response
 
     def _real_extract(self, url):
@@ -162,6 +165,8 @@ class GenericIE(InfoExtractor):
             return self.url_result('http://' + url)
         video_id = os.path.splitext(url.split('/')[-1])[0]
 
             return self.url_result('http://' + url)
         video_id = os.path.splitext(url.split('/')[-1])[0]
 
+        self.to_screen('%s: Requesting header' % video_id)
+
         try:
             response = self._send_head(url)
 
         try:
             response = self._send_head(url)
 
@@ -184,7 +189,7 @@ class GenericIE(InfoExtractor):
                     'formats': [{
                         'format_id': m.group('format_id'),
                         'url': url,
                     'formats': [{
                         'format_id': m.group('format_id'),
                         'url': url,
-                        'vcodec': u'none' if m.group('type') == 'audio' else None
+                        'vcodec': 'none' if m.group('type') == 'audio' else None
                     }],
                     'upload_date': upload_date,
                 }
                     }],
                     'upload_date': upload_date,
                 }
@@ -198,7 +203,7 @@ class GenericIE(InfoExtractor):
         except ValueError:
             # since this is the last-resort InfoExtractor, if
             # this error is thrown, it'll be thrown here
         except ValueError:
             # since this is the last-resort InfoExtractor, if
             # this error is thrown, it'll be thrown here
-            raise ExtractorError(u'Failed to download URL: %s' % url)
+            raise ExtractorError('Failed to download URL: %s' % url)
 
         self.report_extraction(video_id)
 
 
         self.report_extraction(video_id)
 
@@ -209,22 +214,23 @@ class GenericIE(InfoExtractor):
         #   Video Title - Tagline | Site Name
         # and so on and so forth; it's just not practical
         video_title = self._html_search_regex(
         #   Video Title - Tagline | Site Name
         # and so on and so forth; it's just not practical
         video_title = self._html_search_regex(
-            r'(?s)<title>(.*?)</title>', webpage, u'video title',
-            default=u'video')
+            r'(?s)<title>(.*?)</title>', webpage, 'video title',
+            default='video')
 
         # video uploader is domain name
         video_uploader = self._search_regex(
 
         # video uploader is domain name
         video_uploader = self._search_regex(
-            r'^(?:https?://)?([^/]*)/.*', url, u'video uploader')
+            r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
 
         # Look for BrightCove:
         bc_url = BrightcoveIE._extract_brightcove_url(webpage)
         if bc_url is not None:
 
         # Look for BrightCove:
         bc_url = BrightcoveIE._extract_brightcove_url(webpage)
         if bc_url is not None:
-            self.to_screen(u'Brightcove video detected.')
-            return self.url_result(bc_url, 'Brightcove')
+            self.to_screen('Brightcove video detected.')
+            surl = smuggle_url(bc_url, {'Referer': url})
+            return self.url_result(surl, 'Brightcove')
 
         # Look for embedded (iframe) Vimeo player
         mobj = re.search(
 
         # Look for embedded (iframe) Vimeo player
         mobj = re.search(
-            r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
+            r'<iframe[^>]+?src="((?:https?:)?//player.vimeo.com/video/.+?)"', webpage)
         if mobj:
             player_url = unescapeHTML(mobj.group(1))
             surl = smuggle_url(player_url, {'Referer': url})
         if mobj:
             player_url = unescapeHTML(mobj.group(1))
             surl = smuggle_url(player_url, {'Referer': url})
@@ -271,16 +277,12 @@ class GenericIE(InfoExtractor):
             }
 
         # Look for embedded blip.tv player
             }
 
         # Look for embedded blip.tv player
-        mobj = re.search(r'<meta\s[^>]*https?://api.blip.tv/\w+/redirect/\w+/(\d+)', webpage)
+        mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
         if mobj:
         if mobj:
-            return self.url_result('http://blip.tv/seo/-'+mobj.group(1), 'BlipTV')
-        mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*https?://(?:\w+\.)?blip.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', webpage)
+            return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
+        mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
         if mobj:
         if mobj:
-            player_url = 'http://blip.tv/play/%s.x?p=1' % mobj.group(1)
-            player_page = self._download_webpage(player_url, mobj.group(1))
-            blip_video_id = self._search_regex(r'data-episode-id="(\d+)', player_page, u'blip_video_id', fatal=False)
-            if blip_video_id:
-                return self.url_result('http://blip.tv/seo/-'+blip_video_id, 'BlipTV')
+            return self.url_result(mobj.group(1), 'BlipTV')
 
         # Look for Bandcamp pages with custom domain
         mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
 
         # Look for Bandcamp pages with custom domain
         mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
@@ -301,18 +303,32 @@ class GenericIE(InfoExtractor):
             return OoyalaIE._build_url_result(mobj.group(1))
 
         # Look for Aparat videos
             return OoyalaIE._build_url_result(mobj.group(1))
 
         # Look for Aparat videos
-        mobj = re.search(r'<iframe src="(http://www.aparat.com/video/[^"]+)"', webpage)
+        mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
         if mobj is not None:
             return self.url_result(mobj.group(1), 'Aparat')
 
         if mobj is not None:
             return self.url_result(mobj.group(1), 'Aparat')
 
+        # Look for MPORA videos
+        mobj = re.search(r'<iframe .*?src="(http://mpora\.com/videos/[^"]+)"', webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group(1), 'Mpora')
+
+        # Look for embedded Novamov player
+        mobj = re.search(
+            r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage)
+        if mobj is not None:
+            return self.url_result(mobj.group('url'), 'Novamov')
+
         # Start with something easy: JW Player in SWFObject
         mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
         # Start with something easy: JW Player in SWFObject
         mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Look for gorilla-vid style embedding
+            mobj = re.search(r'(?s)jw_plugins.*?file:\s*["\'](.*?)["\']', webpage)
         if mobj is None:
             # Broaden the search a little bit
             mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
         if mobj is None:
             # Broaden the search a little bit: JWPlayer JS loader
         if mobj is None:
             # Broaden the search a little bit
             mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
         if mobj is None:
             # Broaden the search a little bit: JWPlayer JS loader
-            mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"]*)', webpage)
+            mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
         if mobj is None:
             # Try to find twitter cards info
             mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
         if mobj is None:
             # Try to find twitter cards info
             mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
@@ -327,23 +343,27 @@ class GenericIE(InfoExtractor):
             # HTML5 video
             mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
         if mobj is None:
             # HTML5 video
             mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
         if mobj is None:
-            raise ExtractorError(u'Unsupported URL: %s' % url)
+            raise ExtractorError('Unsupported URL: %s' % url)
 
         # It's possible that one of the regexes
         # matched, but returned an empty group:
         if mobj.group(1) is None:
 
         # It's possible that one of the regexes
         # matched, but returned an empty group:
         if mobj.group(1) is None:
-            raise ExtractorError(u'Did not find a valid video URL at %s' % url)
+            raise ExtractorError('Did not find a valid video URL at %s' % url)
 
         video_url = mobj.group(1)
         video_url = compat_urlparse.urljoin(url, video_url)
         video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
 
 
         video_url = mobj.group(1)
         video_url = compat_urlparse.urljoin(url, video_url)
         video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
 
+        # Sometimes, jwplayer extraction will result in a YouTube URL
+        if YoutubeIE.suitable(video_url):
+            return self.url_result(video_url, 'Youtube')
+
         # here's a fun little line of code for you:
         video_id = os.path.splitext(video_id)[0]
 
         return {
         # here's a fun little line of code for you:
         video_id = os.path.splitext(video_id)[0]
 
         return {
-            'id':       video_id,
-            'url':      video_url,
+            'id': video_id,
+            'url': video_url,
             'uploader': video_uploader,
             'uploader': video_uploader,
-            'title':    video_title,
+            'title': video_title,
         }
         }
index e5332cce820ca239c915da402107a77143f0484b..f40769eac0361594ac64f6ad4ccba7d35281333a 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -9,18 +11,18 @@ from ..utils import (
 
 
 class ImdbIE(InfoExtractor):
 
 
 class ImdbIE(InfoExtractor):
-    IE_NAME = u'imdb'
-    IE_DESC = u'Internet Movie Database trailers'
+    IE_NAME = 'imdb'
+    IE_DESC = 'Internet Movie Database trailers'
     _VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)'
 
     _TEST = {
     _VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://www.imdb.com/video/imdb/vi2524815897',
-        u'md5': u'9f34fa777ade3a6e57a054fdbcb3a068',
-        u'info_dict': {
-            u'id': u'2524815897',
-            u'ext': u'mp4',
-            u'title': u'Ice Age: Continental Drift Trailer (No. 2) - IMDb',
-            u'description': u'md5:9061c2219254e5d14e03c25c98e96a81',
+        'url': 'http://www.imdb.com/video/imdb/vi2524815897',
+        'md5': '9f34fa777ade3a6e57a054fdbcb3a068',
+        'info_dict': {
+            'id': '2524815897',
+            'ext': 'mp4',
+            'title': 'Ice Age: Continental Drift Trailer (No. 2) - IMDb',
+            'description': 'md5:9061c2219254e5d14e03c25c98e96a81',
         }
     }
 
         }
     }
 
@@ -37,10 +39,10 @@ class ImdbIE(InfoExtractor):
             f_path = f_path.strip()
             format_page = self._download_webpage(
                 compat_urlparse.urljoin(url, f_path),
             f_path = f_path.strip()
             format_page = self._download_webpage(
                 compat_urlparse.urljoin(url, f_path),
-                u'Downloading info for %s format' % f_id)
+                'Downloading info for %s format' % f_id)
             json_data = self._search_regex(
                 r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
             json_data = self._search_regex(
                 r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
-                format_page, u'json data', flags=re.DOTALL)
+                format_page, 'json data', flags=re.DOTALL)
             info = json.loads(json_data)
             format_info = info['videoPlayerObject']['video']
             formats.append({
             info = json.loads(json_data)
             format_info = info['videoPlayerObject']['video']
             formats.append({
@@ -55,3 +57,33 @@ class ImdbIE(InfoExtractor):
             'description': descr,
             'thumbnail': format_info['slate'],
         }
             'description': descr,
             'thumbnail': format_info['slate'],
         }
+
+
+class ImdbListIE(InfoExtractor):
+    IE_NAME = 'imdb:list'
+    IE_DESC = 'Internet Movie Database lists'
+    _VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
+    
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        list_id = mobj.group('id')
+        
+        # RSS XML is sometimes malformed
+        rss = self._download_webpage('http://rss.imdb.com/list/%s' % list_id, list_id, 'Downloading list RSS')
+        list_title = self._html_search_regex(r'<title>(.*?)</title>', rss, 'list title')
+        
+        # Export is independent of actual author_id, but returns 404 if no author_id is provided.
+        # However, passing dummy author_id seems to be enough.
+        csv = self._download_webpage('http://www.imdb.com/list/export?list_id=%s&author_id=ur00000000' % list_id,
+                                     list_id, 'Downloading list CSV')
+        
+        entries = []
+        for item in csv.split('\n')[1:]:
+            cols = item.split(',')
+            if len(cols) < 2:
+                continue
+            item_id = cols[1][1:-1]
+            if item_id.startswith('vi'):
+                entries.append(self.url_result('http://www.imdb.com/video/imdb/%s' % item_id, 'Imdb'))
+        
+        return self.playlist_result(entries, list_id, list_title)
index 652f19b7b8ea689d7861b04f6ff421c144c300d9..ef9bca734af0187014ba39170d9a230d8d4c52a2 100644 (file)
@@ -7,7 +7,7 @@ class InaIE(InfoExtractor):
     """Information Extractor for Ina.fr"""
     _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I?[A-F0-9]+)/.*'
     _TEST = {
     """Information Extractor for Ina.fr"""
     _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I?[A-F0-9]+)/.*'
     _TEST = {
-        u'url': u'www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
+        u'url': u'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
         u'file': u'I12055569.mp4',
         u'md5': u'a667021bf2b41f8dc6049479d9bb38a3',
         u'info_dict': {
         u'file': u'I12055569.mp4',
         u'md5': u'a667021bf2b41f8dc6049479d9bb38a3',
         u'info_dict': {
index 16a6f73c87e27d47d401c444aa02b80d5a6313b3..4ddda2f1bb86dd534f623218b2acd74566d781e1 100644 (file)
@@ -5,7 +5,6 @@ from ..utils import (
     compat_urlparse,
     compat_urllib_parse,
     xpath_with_ns,
     compat_urlparse,
     compat_urllib_parse,
     xpath_with_ns,
-    determine_ext,
 )
 
 
 )
 
 
@@ -63,13 +62,17 @@ class InternetVideoArchiveIE(InfoExtractor):
         for content in item.findall(_bp('media:group/media:content')):
             attr = content.attrib
             f_url = attr['url']
         for content in item.findall(_bp('media:group/media:content')):
             attr = content.attrib
             f_url = attr['url']
+            width = int(attr['width'])
+            bitrate = int(attr['bitrate'])
+            format_id = '%d-%dk' % (width, bitrate)
             formats.append({
             formats.append({
+                'format_id': format_id,
                 'url': f_url,
                 'url': f_url,
-                'ext': determine_ext(f_url),
-                'width': int(attr['width']),
-                'bitrate': int(attr['bitrate']),
+                'width': width,
+                'tbr': bitrate,
             })
             })
-        formats = sorted(formats, key=lambda f: f['bitrate'])
+
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index 4bdf55f934aa63b005ff06e9b088b956f6806a02..98d1d272a6bb5085dadae9c8233a0e2592571636 100644 (file)
@@ -84,14 +84,16 @@ class IviIE(InfoExtractor):
 
         result = video_json[u'result']
 
 
         result = video_json[u'result']
 
-        formats = [{'url': x[u'url'],
-                    'format_id': x[u'content_format']
-                    } for x in result[u'files'] if x[u'content_format'] in self._known_formats]
-        formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id']))
-
-        if len(formats) == 0:
-            self._downloader.report_warning(u'No media links available for %s' % video_id)
-            return
+        formats = [{
+            'url': x[u'url'],
+            'format_id': x[u'content_format'],
+            'preference': self._known_formats.index(x[u'content_format']),
+        } for x in result[u'files'] if x[u'content_format'] in self._known_formats]
+
+        self._sort_formats(formats)
+
+        if not formats:
+            raise ExtractorError(u'No media links available for %s' % video_id)
 
         duration = result[u'duration']
         compilation = result[u'compilation']
 
         duration = result[u'duration']
         compilation = result[u'compilation']
diff --git a/youtube_dl/extractor/jpopsukitv.py b/youtube_dl/extractor/jpopsukitv.py
new file mode 100644 (file)
index 0000000..aad7825
--- /dev/null
@@ -0,0 +1,73 @@
+# coding=utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    unified_strdate,
+)
+
+
+class JpopsukiIE(InfoExtractor):
+    IE_NAME = 'jpopsuki.tv'
+    _VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
+
+    _TEST = {
+        'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
+        'md5': '88018c0c1a9b1387940e90ec9e7e198e',
+        'file': '00be659d23b0b40508169cdee4545771.mp4',
+        'info_dict': {
+            'id': '00be659d23b0b40508169cdee4545771',
+            'title': 'ayumi hamasaki - evolution',
+            'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
+            'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
+            'uploader': 'plama_chan',
+            'uploader_id': '404',
+            'upload_date': '20121101'
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = 'http://www.jpopsuki.tv' + self._html_search_regex(
+            r'<source src="(.*?)" type', webpage, 'video url')
+
+        video_title = self._og_search_title(webpage)
+        description = self._og_search_description(webpage)
+        thumbnail = self._og_search_thumbnail(webpage)
+        uploader = self._html_search_regex(
+            r'<li>from: <a href="/user/view/user/(.*?)/uid/',
+            webpage, 'video uploader', fatal=False)
+        uploader_id = self._html_search_regex(
+            r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
+            webpage, 'video uploader_id', fatal=False)
+        upload_date = self._html_search_regex(
+            r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
+            fatal=False)
+        if upload_date is not None:
+            upload_date = unified_strdate(upload_date)
+        view_count_str = self._html_search_regex(
+            r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
+            fatal=False)
+        comment_count_str = self._html_search_regex(
+            r'<h2>([0-9]+?) comments</h2>', webpage, 'video comment_count',
+            fatal=False)
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': video_title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'upload_date': upload_date,
+            'view_count': int_or_none(view_count_str),
+            'comment_count': int_or_none(comment_count_str),
+        }
index 50916f4a66c6227e1eb4dc531745c9d5a6ad85d5..23103b163fea1ed6a27cb44dadcf231b478edcdb 100644 (file)
@@ -1,21 +1,24 @@
+from __future__ import unicode_literals
+
 import re
 import hashlib
 
 from .common import InfoExtractor
 import re
 import hashlib
 
 from .common import InfoExtractor
-from ..utils import determine_ext
 
 _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 
 
 _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
 
+
 class KankanIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
     
     _TEST = {
 class KankanIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
     
     _TEST = {
-        u'url': u'http://yinyue.kankan.com/vod/48/48863.shtml',
-        u'file': u'48863.flv',
-        u'md5': u'29aca1e47ae68fc28804aca89f29507e',
-        u'info_dict': {
-            u'title': u'Ready To Go',
+        'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
+        'file': '48863.flv',
+        'md5': '29aca1e47ae68fc28804aca89f29507e',
+        'info_dict': {
+            'title': 'Ready To Go',
         },
         },
+        'skip': 'Only available from China',
     }
 
     def _real_extract(self, url):
     }
 
     def _real_extract(self, url):
@@ -23,22 +26,23 @@ class KankanIE(InfoExtractor):
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
 
         video_id = mobj.group('id')
         webpage = self._download_webpage(url, video_id)
 
-        title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, u'video title')
+        title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
         surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
         gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
         gcid = gcids[-1]
 
         surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
         gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
         gcid = gcids[-1]
 
-        video_info_page = self._download_webpage('http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid,
-                                                 video_id, u'Downloading video url info')
-        ip = self._search_regex(r'ip:"(.+?)"', video_info_page, u'video url ip')
-        path = self._search_regex(r'path:"(.+?)"', video_info_page, u'video url path')
-        param1 = self._search_regex(r'param1:(\d+)', video_info_page, u'param1')
-        param2 = self._search_regex(r'param2:(\d+)', video_info_page, u'param2')
+        info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
+        video_info_page = self._download_webpage(
+            info_url, video_id, 'Downloading video url info')
+        ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
+        path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
+        param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
+        param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
         key = _md5('xl_mp43651' + param1 + param2)
         video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
 
         key = _md5('xl_mp43651' + param1 + param2)
         video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
 
-        return {'id': video_id,
-                'title': title,
-                'url': video_url,
-                'ext': determine_ext(video_url),
-                }
+        return {
+            'id': video_id,
+            'title': title,
+            'url': video_url,
+        }
diff --git a/youtube_dl/extractor/khanacademy.py b/youtube_dl/extractor/khanacademy.py
new file mode 100644 (file)
index 0000000..772bb56
--- /dev/null
@@ -0,0 +1,71 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    unified_strdate,
+)
+
+
+class KhanAcademyIE(InfoExtractor):
+    _VALID_URL = r'^https?://(?:www\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
+    IE_NAME = 'KhanAcademy'
+
+    _TEST = {
+        'url': 'http://www.khanacademy.org/video/one-time-pad',
+        'file': 'one-time-pad.mp4',
+        'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
+        'info_dict': {
+            'title': 'The one-time pad',
+            'description': 'The perfect cipher',
+            'duration': 176,
+            'uploader': 'Brit Cruise',
+            'upload_date': '20120411',
+        }
+    }
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
+        if m.group('key') == 'video':
+            data = self._download_json(
+                'http://api.khanacademy.org/api/v1/videos/' + video_id,
+                video_id, 'Downloading video info')
+
+            upload_date = unified_strdate(data['date_added'])
+            uploader = ', '.join(data['author_names'])
+            return {
+                '_type': 'url_transparent',
+                'url': data['url'],
+                'id': video_id,
+                'title': data['title'],
+                'thumbnail': data['image_url'],
+                'duration': data['duration'],
+                'description': data['description'],
+                'uploader': uploader,
+                'upload_date': upload_date,
+            }
+        else:
+            # topic
+            data = self._download_json(
+                'http://api.khanacademy.org/api/v1/topic/' + video_id,
+                video_id, 'Downloading topic info')
+
+            entries = [
+                {
+                    '_type': 'url',
+                    'url': c['url'],
+                    'id': c['id'],
+                    'title': c['title'],
+                }
+                for c in data['children'] if c['kind'] in ('Video', 'Topic')]
+
+            return {
+                '_type': 'playlist',
+                'id': video_id,
+                'title': data['title'],
+                'description': data['description'],
+                'entries': entries,
+            }
diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py
new file mode 100644 (file)
index 0000000..6deed27
--- /dev/null
@@ -0,0 +1,201 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .subtitles import SubtitlesInfoExtractor
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+    ExtractorError
+)
+
+
+class LyndaIE(SubtitlesInfoExtractor):
+    IE_NAME = 'lynda'
+    IE_DESC = 'lynda.com videos'
+    _VALID_URL = r'https?://www\.lynda\.com/[^/]+/[^/]+/\d+/(\d+)-\d\.html'
+    _LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
+    _NETRC_MACHINE = 'lynda'
+
+    _SUCCESSFUL_LOGIN_REGEX = r'<a href="https://www.lynda.com/home/userAccount/ChangeContactInfo.aspx" data-qa="eyebrow_account_menu">My account'
+    _TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
+
+    ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
+
+    _TEST = {
+        'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
+        'file': '114408.mp4',
+        'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
+        'info_dict': {
+            'title': 'Using the exercise files',
+            'duration': 68
+        }
+    }
+
+    def _real_initialize(self):
+        self._login()
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group(1)
+
+        page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
+                                      video_id, 'Downloading video JSON')
+        video_json = json.loads(page)
+
+        if 'Status' in video_json:
+            raise ExtractorError('lynda returned error: %s' % video_json['Message'], expected=True)
+
+        if video_json['HasAccess'] is False:
+            raise ExtractorError('Video %s is only available for members. ' % video_id + self.ACCOUNT_CREDENTIALS_HINT, expected=True)
+
+        video_id = video_json['ID']
+        duration = video_json['DurationInSeconds']
+        title = video_json['Title']
+
+        formats = [{'url': fmt['Url'],
+                    'ext': fmt['Extension'],
+                    'width': fmt['Width'],
+                    'height': fmt['Height'],
+                    'filesize': fmt['FileSize'],
+                    'format_id': str(fmt['Resolution'])
+                    } for fmt in video_json['Formats']]
+
+        self._sort_formats(formats)
+
+        if self._downloader.params.get('listsubtitles', False):
+            self._list_available_subtitles(video_id, page)
+            return
+
+        subtitles = self._fix_subtitles(self.extract_subtitles(video_id, page))
+
+        return {
+            'id': video_id,
+            'title': title,
+            'duration': duration,
+            'subtitles': subtitles,
+            'formats': formats
+        }
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+
+        login_form = {
+            'username': username,
+            'password': password,
+            'remember': 'false',
+            'stayPut': 'false'
+        }        
+        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
+
+        # Not (yet) logged in
+        m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
+        if m is not None:
+            response = m.group('json')
+            response_json = json.loads(response)            
+            state = response_json['state']
+
+            if state == 'notlogged':
+                raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
+
+            # This is when we get popup:
+            # > You're already logged in to lynda.com on two devices.
+            # > If you log in here, we'll log you out of another device.
+            # So, we need to confirm this.
+            if state == 'conflicted':
+                confirm_form = {
+                    'username': '',
+                    'password': '',
+                    'resolve': 'true',
+                    'remember': 'false',
+                    'stayPut': 'false',
+                }
+                request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form))
+                login_page = self._download_webpage(request, None, note='Confirming log in and log out from another device')
+
+        if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
+            raise ExtractorError('Unable to log in')
+
+    def _fix_subtitles(self, subtitles):
+        if subtitles is None:
+            return subtitles  # subtitles not requested
+
+        fixed_subtitles = {}
+        for k, v in subtitles.items():
+            subs = json.loads(v)
+            if len(subs) == 0:
+                continue
+            srt = ''
+            for pos in range(0, len(subs) - 1):
+                seq_current = subs[pos]
+                m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
+                if m_current is None:
+                    continue
+                seq_next = subs[pos + 1]
+                m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
+                if m_next is None:
+                    continue
+                appear_time = m_current.group('timecode')
+                disappear_time = m_next.group('timecode')
+                text = seq_current['Caption']
+                srt += '%s\r\n%s --> %s\r\n%s' % (str(pos), appear_time, disappear_time, text)
+            if srt:
+                fixed_subtitles[k] = srt
+        return fixed_subtitles
+
+    def _get_available_subtitles(self, video_id, webpage):
+        url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
+        sub = self._download_webpage(url, None, note=False)
+        sub_json = json.loads(sub)
+        return {'en': url} if len(sub_json) > 0 else {}
+
+
+class LyndaCourseIE(InfoExtractor):
+    IE_NAME = 'lynda:course'
+    IE_DESC = 'lynda.com online courses'
+
+    # Course link equals to welcome/introduction video link of same course
+    # We will recognize it as course link
+    _VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        course_path = mobj.group('coursepath')
+        course_id = mobj.group('courseid')
+        
+        page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
+                                      course_id, 'Downloading course JSON')
+        course_json = json.loads(page)
+
+        if 'Status' in course_json and course_json['Status'] == 'NotFound':
+            raise ExtractorError('Course %s does not exist' % course_id, expected=True)
+
+        unaccessible_videos = 0
+        videos = []
+        (username, _) = self._get_login_info()
+
+        for chapter in course_json['Chapters']:
+            for video in chapter['Videos']:
+                if username is None and video['HasAccess'] is False:
+                    unaccessible_videos += 1
+                    continue
+                videos.append(video['ID'])
+
+        if unaccessible_videos > 0:
+            self._downloader.report_warning('%s videos are only available for members and will not be downloaded. '
+                                            % unaccessible_videos + LyndaIE.ACCOUNT_CREDENTIALS_HINT)
+
+        entries = [
+            self.url_result('http://www.lynda.com/%s/%s-4.html' %
+                            (course_path, video_id),
+                            'Lynda')
+            for video_id in videos]
+
+        course_title = course_json['Title']
+
+        return self.playlist_result(entries, course_id, course_title)
\ No newline at end of file
diff --git a/youtube_dl/extractor/macgamestore.py b/youtube_dl/extractor/macgamestore.py
new file mode 100644 (file)
index 0000000..b818cf5
--- /dev/null
@@ -0,0 +1,43 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class MacGameStoreIE(InfoExtractor):
+    IE_NAME = 'macgamestore'
+    IE_DESC = 'MacGameStore trailers'
+    _VALID_URL = r'https?://www\.macgamestore\.com/mediaviewer\.php\?trailer=(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://www.macgamestore.com/mediaviewer.php?trailer=2450',
+        'file': '2450.m4v',
+        'md5': '8649b8ea684b6666b4c5be736ecddc61',
+        'info_dict': {
+            'title': 'Crow',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, video_id, 'Downloading trailer page')
+
+        if re.search(r'>Missing Media<', webpage) is not None:
+            raise ExtractorError('Trailer %s does not exist' % video_id, expected=True)
+
+        video_title = self._html_search_regex(
+            r'<title>MacGameStore: (.*?) Trailer</title>', webpage, 'title')
+
+        video_url = self._html_search_regex(
+            r'(?s)<div\s+id="video-player".*?href="([^"]+)"\s*>',
+            webpage, 'video URL')
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': video_title
+        }
index 08ce0647f93e69ab34187ef08926d89a9de458b4..7aa0080d735fe811d6babf110156f4ab895edbdd 100644 (file)
@@ -52,10 +52,11 @@ class MDRIE(InfoExtractor):
                     'format_id': u'%s-%d' % (media_type, vbr),
                 })
             formats.append(format)
                     'format_id': u'%s-%d' % (media_type, vbr),
                 })
             formats.append(format)
-        formats.sort(key=lambda f: (f.get('vbr'), f['abr']))
         if not formats:
             raise ExtractorError(u'Could not find any valid formats')
 
         if not formats:
             raise ExtractorError(u'Could not find any valid formats')
 
+        self._sort_formats(formats)
+
         return {
             'id': video_id,
             'title': title,
         return {
             'id': video_id,
             'title': title,
index e560c1d354d8b03a05133bf1458ce8d28b84b7bc..f3ff0e8bb47ac3307d52eeb10ad1b5449c6fb8e3 100644 (file)
@@ -1,5 +1,6 @@
+from __future__ import unicode_literals
+
 import re
 import re
-import operator
 
 from .common import InfoExtractor
 from ..utils import (
 
 from .common import InfoExtractor
 from ..utils import (
@@ -11,12 +12,12 @@ class MetacriticIE(InfoExtractor):
     _VALID_URL = r'https?://www\.metacritic\.com/.+?/trailers/(?P<id>\d+)'
 
     _TEST = {
     _VALID_URL = r'https?://www\.metacritic\.com/.+?/trailers/(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222',
-        u'file': u'3698222.mp4',
-        u'info_dict': {
-            u'title': u'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors',
-            u'description': u'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.',
-            u'duration': 221,
+        'url': 'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222',
+        'file': '3698222.mp4',
+        'info_dict': {
+            'title': 'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors',
+            'description': 'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.',
+            'duration': 221,
         },
     }
 
         },
     }
 
@@ -26,7 +27,7 @@ class MetacriticIE(InfoExtractor):
         webpage = self._download_webpage(url, video_id)
         # The xml is not well formatted, there are raw '&'
         info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
         webpage = self._download_webpage(url, video_id)
         # The xml is not well formatted, there are raw '&'
         info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
-            video_id, u'Downloading info xml', transform_source=fix_xml_all_ampersand)
+            video_id, 'Downloading info xml', transform_source=fix_xml_all_ampersand)
 
         clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
         formats = []
 
         clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
         formats = []
@@ -37,12 +38,12 @@ class MetacriticIE(InfoExtractor):
                 'url': video_url,
                 'ext': 'mp4',
                 'format_id': rate_str,
                 'url': video_url,
                 'ext': 'mp4',
                 'format_id': rate_str,
-                'rate': int(rate_str),
+                'tbr': int(rate_str),
             })
             })
-        formats.sort(key=operator.itemgetter('rate'))
+        self._sort_formats(formats)
 
         description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
 
         description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
-            webpage, u'description', flags=re.DOTALL)
+            webpage, 'description', flags=re.DOTALL)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index 52be9232fe12d394ed600c349721339c5891a78a..76b717fe5dbac08b8b103a1e44192a6fbf6d2a55 100644 (file)
@@ -33,8 +33,18 @@ class TechTVMITIE(InfoExtractor):
             raw_page, u'base url')
         formats_json = self._search_regex(r'bitrates: (\[.+?\])', raw_page,
             u'video formats')
             raw_page, u'base url')
         formats_json = self._search_regex(r'bitrates: (\[.+?\])', raw_page,
             u'video formats')
-        formats = json.loads(formats_json)
-        formats = sorted(formats, key=lambda f: f['bitrate'])
+        formats_mit = json.loads(formats_json)
+        formats = [
+            {
+                'format_id': f['label'],
+                'url': base_url + f['url'].partition(':')[2],
+                'ext': f['url'].partition(':')[0],
+                'format': f['label'],
+                'width': f['width'],
+                'vbr': f['bitrate'],
+            }
+            for f in formats_mit
+        ]
 
         title = get_element_by_id('edit-title', clean_page)
         description = clean_html(get_element_by_id('edit-description', clean_page))
 
         title = get_element_by_id('edit-title', clean_page)
         description = clean_html(get_element_by_id('edit-description', clean_page))
@@ -43,8 +53,7 @@ class TechTVMITIE(InfoExtractor):
 
         return {'id': video_id,
                 'title': title,
 
         return {'id': video_id,
                 'title': title,
-                'url': base_url + formats[-1]['url'].replace('mp4:', ''),
-                'ext': 'mp4',
+                'formats': formats,
                 'description': description,
                 'thumbnail': thumbnail,
                 }
                 'description': description,
                 'thumbnail': thumbnail,
                 }
index 125d81551c26ea67eff82f2d2189bd058d16b873..f3356db50ebf8941ac58e9a229778ba864c57be0 100644 (file)
@@ -1,4 +1,5 @@
-import json
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
@@ -10,17 +11,17 @@ from ..utils import (
 
 class MixcloudIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
 
 class MixcloudIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
-    IE_NAME = u'mixcloud'
+    IE_NAME = 'mixcloud'
 
     _TEST = {
 
     _TEST = {
-        u'url': u'http://www.mixcloud.com/dholbach/cryptkeeper/',
-        u'file': u'dholbach-cryptkeeper.mp3',
-        u'info_dict': {
-            u'title': u'Cryptkeeper',
-            u'description': u'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
-            u'uploader': u'Daniel Holbach',
-            u'uploader_id': u'dholbach',
-            u'upload_date': u'20111115',
+        'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/',
+        'file': 'dholbach-cryptkeeper.mp3',
+        'info_dict': {
+            'title': 'Cryptkeeper',
+            'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
+            'uploader': 'Daniel Holbach',
+            'uploader_id': 'dholbach',
+            'upload_date': '20111115',
         },
     }
 
         },
     }
 
@@ -42,18 +43,19 @@ class MixcloudIE(InfoExtractor):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-
         uploader = mobj.group(1)
         cloudcast_name = mobj.group(2)
         track_id = '-'.join((uploader, cloudcast_name))
         uploader = mobj.group(1)
         cloudcast_name = mobj.group(2)
         track_id = '-'.join((uploader, cloudcast_name))
-        api_url = 'http://api.mixcloud.com/%s/%s/' % (uploader, cloudcast_name)
+
         webpage = self._download_webpage(url, track_id)
         webpage = self._download_webpage(url, track_id)
-        json_data = self._download_webpage(api_url, track_id,
-            u'Downloading cloudcast info')
-        info = json.loads(json_data)
 
 
-        preview_url = self._search_regex(r'data-preview-url="(.+?)"', webpage, u'preview url')
-        song_url = preview_url.replace('/previews/', '/cloudcasts/originals/')
+        api_url = 'http://api.mixcloud.com/%s/%s/' % (uploader, cloudcast_name)
+        info = self._download_json(
+            api_url, track_id, 'Downloading cloudcast info')
+
+        preview_url = self._search_regex(
+            r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url')
+        song_url = preview_url.replace('/previews/', '/c/originals/')
         template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
         final_song_url = self._get_url(template_url)
         if final_song_url is None:
         template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
         final_song_url = self._get_url(template_url)
         if final_song_url is None:
diff --git a/youtube_dl/extractor/mpora.py b/youtube_dl/extractor/mpora.py
new file mode 100644 (file)
index 0000000..6a8e2cc
--- /dev/null
@@ -0,0 +1,66 @@
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+)
+
+
+class MporaIE(InfoExtractor):
+    _VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
+    IE_NAME = 'MPORA'
+
+    _TEST = {
+        'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de',
+        'file': 'AAdo8okx4wiz.mp4',
+        'md5': 'a7a228473eedd3be741397cf452932eb',
+        'info_dict': {
+            'title': 'Katy Curd -  Winter in the Forest',
+            'duration': 416,
+            'uploader': 'petenewman',
+        },
+    }
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
+        webpage = self._download_webpage(url, video_id)
+        data_json = self._search_regex(
+            r"new FM\.Player\('[^']+',\s*(\{.*?)\);\n", webpage, 'json')
+
+        data = json.loads(data_json)
+
+        uploader = data['info_overlay'].get('username')
+        duration = data['video']['duration'] // 1000
+        thumbnail = data['video']['encodings']['sd']['poster']
+        title = data['info_overlay']['title']
+
+        formats = []
+        for encoding_id, edata in data['video']['encodings'].items():
+            for src in edata['sources']:
+                width_str = self._search_regex(
+                    r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
+                    False, default=None)
+                vcodec = src['type'].partition('/')[2]
+                
+                formats.append({
+                    'format_id': encoding_id + '-' + vcodec,
+                    'url': src['src'],
+                    'vcodec': vcodec,
+                    'width': int_or_none(width_str),
+                })
+
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'uploader': uploader,
+            'duration': duration,
+            'thumbnail': thumbnail,
+        }
index ed11f521aa02aa3fe421b8fc743b0a26b1e1cdd0..f1cf41e2dbf2012764fdb0f2e1745c07ecdef055 100644 (file)
@@ -129,7 +129,7 @@ class MTVIE(MTVServicesInfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('videoid')
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('videoid')
-        uri = mobj.group('mgid')
+        uri = mobj.groupdict().get('mgid')
         if uri is None:
             webpage = self._download_webpage(url, video_id)
     
         if uri is None:
             webpage = self._download_webpage(url, video_id)
     
index 0404e6e43f381c86f8bb91633ca5524564009957..6d35c7861f38f844385454fb7e7d9e8d5ff029a9 100644 (file)
@@ -143,8 +143,10 @@ class MyVideoIE(InfoExtractor):
         if mobj:
             video_url = compat_urllib_parse.unquote(mobj.group(1))
             if 'myvideo2flash' in video_url:
         if mobj:
             video_url = compat_urllib_parse.unquote(mobj.group(1))
             if 'myvideo2flash' in video_url:
-                self._downloader.report_warning(u'forcing RTMPT ...')
-                video_url = video_url.replace('rtmpe://', 'rtmpt://')
+                self.report_warning(
+                    u'Rewriting URL to use unencrypted rtmp:// ...',
+                    video_id)
+                video_url = video_url.replace('rtmpe://', 'rtmp://')
 
         if not video_url:
             # extract non rtmp videos
 
         if not video_url:
             # extract non rtmp videos
diff --git a/youtube_dl/extractor/novamov.py b/youtube_dl/extractor/novamov.py
new file mode 100644 (file)
index 0000000..48ee00d
--- /dev/null
@@ -0,0 +1,62 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    compat_urlparse
+)
+
+
+class NovamovIE(InfoExtractor):
+    _VALID_URL = r'http://(?:(?:www\.)?novamov\.com/video/|(?:(?:embed|www)\.)novamov\.com/embed\.php\?v=)(?P<videoid>[a-z\d]{13})'
+
+    _TEST = {
+        'url': 'http://www.novamov.com/video/4rurhn9x446jj',
+        'file': '4rurhn9x446jj.flv',
+        'md5': '7205f346a52bbeba427603ba10d4b935',
+        'info_dict': {
+            'title': 'search engine optimization',
+            'description': 'search engine optimization is used to rank the web page in the google search engine'
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+
+        page = self._download_webpage('http://www.novamov.com/video/%s' % video_id,
+                                      video_id, 'Downloading video page')
+
+        if re.search(r'This file no longer exists on our servers!</h2>', page) is not None:
+            raise ExtractorError(u'Video %s does not exist' % video_id, expected=True)
+
+        filekey = self._search_regex(
+            r'flashvars\.filekey="(?P<filekey>[^"]+)";', page, 'filekey')
+
+        title = self._html_search_regex(
+            r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>',
+            page, 'title', fatal=False)
+
+        description = self._html_search_regex(
+            r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>',
+            page, 'description', fatal=False)
+
+        api_response = self._download_webpage(
+            'http://www.novamov.com/api/player.api.php?key=%s&file=%s' % (filekey, video_id),
+            video_id, 'Downloading video api response')
+
+        response = compat_urlparse.parse_qs(api_response)
+
+        if 'error_msg' in response:
+            raise ExtractorError('novamov returned error: %s' % response['error_msg'][0], expected=True)
+
+        video_url = response['url'][0]
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': description
+        }
index 241cc160b9ca58bfc6b88bf9c12fe134df3b3d66..168ca8b9fa961f350fd3d3292bda648589be5197 100644 (file)
@@ -5,7 +5,7 @@ from ..utils import compat_urlparse
 
 
 class NowVideoIE(InfoExtractor):
 
 
 class NowVideoIE(InfoExtractor):
-    _VALID_URL = r'(?:https?://)?(?:www\.)?nowvideo\.ch/video/(?P<id>\w+)'
+    _VALID_URL = r'(?:https?://)?(?:www\.)?nowvideo\.(?:ch|sx)/video/(?P<id>\w+)'
     _TEST = {
         u'url': u'http://www.nowvideo.ch/video/0mw0yow7b6dxa',
         u'file': u'0mw0yow7b6dxa.flv',
     _TEST = {
         u'url': u'http://www.nowvideo.ch/video/0mw0yow7b6dxa',
         u'file': u'0mw0yow7b6dxa.flv',
index b42eae89aca1bdc894e29a876d06e4c5d49564a0..5f5694393765104b45b573c53155d447a45b1e50 100644 (file)
@@ -1,54 +1,98 @@
 # coding: utf-8
 # coding: utf-8
+from __future__ import unicode_literals
 
 
-import re
-import xml.etree.ElementTree
 import json
 import json
+import re
 
 from .common import InfoExtractor
 from ..utils import (
 
 from .common import InfoExtractor
 from ..utils import (
-    compat_urlparse,
-    ExtractorError,
-    find_xpath_attr,
+    HEADRequest,
+    unified_strdate,
 )
 
 )
 
+
 class ORFIE(InfoExtractor):
 class ORFIE(InfoExtractor):
-    _VALID_URL = r'https?://tvthek\.orf\.at/(programs/.+?/episodes|topics/.+?)/(?P<id>\d+)'
+    _VALID_URL = r'https?://tvthek\.orf\.at/(?:programs/.+?/episodes|topics/.+?|program/[^/]+)/(?P<id>\d+)'
+
+    _TEST = {
+        'url': 'http://tvthek.orf.at/program/matinee-Was-Sie-schon-immer-ueber-Klassik-wissen-wollten/7317210/Was-Sie-schon-immer-ueber-Klassik-wissen-wollten/7319746/Was-Sie-schon-immer-ueber-Klassik-wissen-wollten/7319747',
+        'file': '7319747.mp4',
+        'md5': 'bd803c5d8c32d3c64a0ea4b4eeddf375',
+        'info_dict': {
+            'title': 'Was Sie schon immer über Klassik wissen wollten',
+            'description': 'md5:0ddf0d5f0060bd53f744edaa5c2e04a4',
+            'duration': 3508,
+            'upload_date': '20140105',
+        },
+        'skip': 'Blocked outside of Austria',
+    }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         playlist_id = mobj.group('id')
         webpage = self._download_webpage(url, playlist_id)
 
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         playlist_id = mobj.group('id')
         webpage = self._download_webpage(url, playlist_id)
 
-        flash_xml = self._search_regex('ORF.flashXML = \'(.+?)\'', webpage, u'flash xml')
-        flash_xml = compat_urlparse.parse_qs('xml='+flash_xml)['xml'][0]
-        flash_config = xml.etree.ElementTree.fromstring(flash_xml.encode('utf-8'))
-        playlist_json = self._search_regex(r'playlist\': \'(\[.*?\])\'', webpage, u'playlist').replace(r'\"','"')
-        playlist = json.loads(playlist_json)
-
-        videos = []
-        ns = '{http://tempuri.org/XMLSchema.xsd}'
-        xpath = '%(ns)sPlaylist/%(ns)sItems/%(ns)sItem' % {'ns': ns}
-        webpage_description = self._og_search_description(webpage)
-        for (i, (item, info)) in enumerate(zip(flash_config.findall(xpath), playlist), 1):
-            # Get best quality url
-            rtmp_url = None
-            for q in ['Q6A', 'Q4A', 'Q1A']:
-                video_url = find_xpath_attr(item, '%sVideoUrl' % ns, 'quality', q)
-                if video_url is not None:
-                    rtmp_url = video_url.text
-                    break
-            if rtmp_url is None:
-                raise ExtractorError(u'Couldn\'t get video url: %s' % info['id'])
-            description = self._html_search_regex(
-                r'id="playlist_entry_%s".*?<p>(.*?)</p>' % i, webpage,
-                u'description', default=webpage_description, flags=re.DOTALL)
-            videos.append({
+        data_json = self._search_regex(
+            r'initializeAdworx\((.+?)\);\n', webpage, 'video info')
+        all_data = json.loads(data_json)
+        sdata = all_data[0]['values']['segments']
+
+        def quality_to_int(s):
+            m = re.search('([0-9]+)', s)
+            if m is None:
+                return -1
+            return int(m.group(1))
+
+        entries = []
+        for sd in sdata:
+            video_id = sd['id']
+            formats = [{
+                'preference': -10 if fd['delivery'] == 'hls' else None,
+                'format_id': '%s-%s-%s' % (
+                    fd['delivery'], fd['quality'], fd['quality_string']),
+                'url': fd['src'],
+                'protocol': fd['protocol'],
+                'quality': quality_to_int(fd['quality']),
+            } for fd in sd['playlist_item_array']['sources']]
+
+            # Check for geoblocking.
+            # There is a property is_geoprotection, but that's always false
+            geo_str = sd.get('geoprotection_string')
+            if geo_str:
+                try:
+                    http_url = next(
+                        f['url']
+                        for f in formats
+                        if re.match(r'^https?://.*\.mp4$', f['url']))
+                except StopIteration:
+                    pass
+                else:
+                    req = HEADRequest(http_url)
+                    self._request_webpage(
+                        req, video_id,
+                        note='Testing for geoblocking',
+                        errnote=((
+                            'This video seems to be blocked outside of %s. '
+                            'You may want to try the streaming-* formats.')
+                            % geo_str),
+                        fatal=False)
+
+            self._sort_formats(formats)
+
+            upload_date = unified_strdate(sd['created_date'])
+            entries.append({
                 '_type': 'video',
                 '_type': 'video',
-                'id': info['id'],
-                'title': info['title'],
-                'url': rtmp_url,
-                'ext': 'flv',
-                'description': description,
-                })
-
-        return videos
+                'id': video_id,
+                'title': sd['header'],
+                'formats': formats,
+                'description': sd.get('description'),
+                'duration': int(sd['duration_in_seconds']),
+                'upload_date': upload_date,
+                'thumbnail': sd.get('image_full_url'),
+            })
+
+        return {
+            '_type': 'playlist',
+            'entries': entries,
+            'id': playlist_id,
+        }
index 71abd5013bf272ffbb2812a87a6838fd3aeedd7e..e9ff8d1af893c8e22830bbfd41d37b56d5815c9a 100644 (file)
@@ -5,7 +5,7 @@ from ..utils import compat_urllib_parse
 
 
 class PornHdIE(InfoExtractor):
 
 
 class PornHdIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?pornhd\.com/videos/(?P<video_id>[0-9]+)/(?P<video_title>.+)'
+    _VALID_URL = r'(?:http://)?(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<video_id>[0-9]+)/(?P<video_title>.+)'
     _TEST = {
         u'url': u'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
         u'file': u'1962.flv',
     _TEST = {
         u'url': u'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
         u'file': u'1962.flv',
index d9135c6b929765e87b13e58f6fd6af5567c55199..fdda69f33064fe4bd4e89775c5e3c7d56e4946d2 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import re
 
 import os
 import re
 
@@ -11,16 +13,17 @@ from ..aes import (
     aes_decrypt_text
 )
 
     aes_decrypt_text
 )
 
+
 class PornHubIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
     _TEST = {
 class PornHubIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
     _TEST = {
-        u'url': u'http://www.pornhub.com/view_video.php?viewkey=648719015',
-        u'file': u'648719015.mp4',
-        u'md5': u'882f488fa1f0026f023f33576004a2ed',
-        u'info_dict': {
-            u"uploader": u"BABES-COM", 
-            u"title": u"Seductive Indian beauty strips down and fingers her pink pussy",
-            u"age_limit": 18
+        'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
+        'file': '648719015.mp4',
+        'md5': '882f488fa1f0026f023f33576004a2ed',
+        'info_dict': {
+            "uploader": "BABES-COM",
+            "title": "Seductive Indian beauty strips down and fingers her pink pussy",
+            "age_limit": 18
         }
     }
 
         }
     }
 
@@ -33,15 +36,15 @@ class PornHubIE(InfoExtractor):
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
-        video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, u'title')
-        video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False)
-        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False)
+        video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
+        video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, 'uploader', fatal=False)
+        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
         if thumbnail:
             thumbnail = compat_urllib_parse.unquote(thumbnail)
 
         video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
         if webpage.find('"encrypted":true') != -1:
         if thumbnail:
             thumbnail = compat_urllib_parse.unquote(thumbnail)
 
         video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
         if webpage.find('"encrypted":true') != -1:
-            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password').replace('+', ' ')
+            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password').replace('+', ' ')
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
 
         formats = []
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
 
         formats = []
@@ -50,13 +53,24 @@ class PornHubIE(InfoExtractor):
             extension = os.path.splitext(path)[1][1:]
             format = path.split('/')[5].split('_')[:2]
             format = "-".join(format)
             extension = os.path.splitext(path)[1][1:]
             format = path.split('/')[5].split('_')[:2]
             format = "-".join(format)
+
+            m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
+            if m is None:
+                height = None
+                tbr = None
+            else:
+                height = int(m.group('height'))
+                tbr = int(m.group('tbr'))
+
             formats.append({
                 'url': video_url,
                 'ext': extension,
                 'format': format,
                 'format_id': format,
             formats.append({
                 'url': video_url,
                 'ext': extension,
                 'format': format,
                 'format_id': format,
+                'tbr': tbr,
+                'height': height,
             })
             })
-        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index c2254ae8abdca2ab9dde2388fb2182b056ffd0e2..5c4cd20687a5745982bdce0d43417297bca0bc28 100644 (file)
@@ -4,7 +4,7 @@ from .common import InfoExtractor
 
 
 class RedTubeIE(InfoExtractor):
 
 
 class RedTubeIE(InfoExtractor):
-    _VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
+    _VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
     _TEST = {
         u'url': u'http://www.redtube.com/66418',
         u'file': u'66418.mp4',
     _TEST = {
         u'url': u'http://www.redtube.com/66418',
         u'file': u'66418.mp4',
index ccf0b1546452bbe85837ca1de837f7321a0bec0c..a43d6ced57ef454ada80bc47d793f05fec349896 100644 (file)
@@ -39,7 +39,7 @@ class RTLnowIE(InfoExtractor):
         u'skip': u'Only works from Germany',
     },
     {
         u'skip': u'Only works from Germany',
     },
     {
-        u'url': u'www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17',
+        u'url': u'http://www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17',
         u'file': u'13883.flv',
         u'info_dict': {
             u'upload_date': u'20090627', 
         u'file': u'13883.flv',
         u'info_dict': {
             u'upload_date': u'20090627', 
index beea58d6317727133f85b74c14097445cf785dc5..99f5b19d2dd68e78aaf2f45f79fad60b8bb459dc 100644 (file)
@@ -138,7 +138,7 @@ class SmotriIE(InfoExtractor):
         # Warning if video is unavailable
         warning = self._html_search_regex(
             r'<div class="videoUnModer">(.*?)</div>', video_page,
         # Warning if video is unavailable
         warning = self._html_search_regex(
             r'<div class="videoUnModer">(.*?)</div>', video_page,
-            u'warning messagef', default=None)
+            u'warning message', default=None)
         if warning is not None:
             self._downloader.report_warning(
                 u'Video %s may not be available; smotri said: %s ' %
         if warning is not None:
             self._downloader.report_warning(
                 u'Video %s may not be available; smotri said: %s ' %
index e22ff9c387ab0e01c1e6fcb1da793af877f37a5c..393b5f17c53d5ed216b53ac0d1ff2941cb1d24f7 100644 (file)
@@ -1,4 +1,6 @@
 # encoding: utf-8
 # encoding: utf-8
+from __future__ import unicode_literals
+
 import json
 import re
 import itertools
 import json
 import re
 import itertools
@@ -29,61 +31,61 @@ class SoundcloudIE(InfoExtractor):
                             (?!sets/)(?P<title>[\w\d-]+)/?
                             (?P<token>[^?]+?)?(?:[?].*)?$)
                        |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+))
                             (?!sets/)(?P<title>[\w\d-]+)/?
                             (?P<token>[^?]+?)?(?:[?].*)?$)
                        |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+))
-                       |(?P<widget>w\.soundcloud\.com/player/?.*?url=.*)
+                       |(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
                     )
                     '''
                     )
                     '''
-    IE_NAME = u'soundcloud'
+    IE_NAME = 'soundcloud'
     _TESTS = [
         {
     _TESTS = [
         {
-            u'url': u'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
-            u'file': u'62986583.mp3',
-            u'md5': u'ebef0a451b909710ed1d7787dddbf0d7',
-            u'info_dict': {
-                u"upload_date": u"20121011", 
-                u"description": u"No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd", 
-                u"uploader": u"E.T. ExTerrestrial Music", 
-                u"title": u"Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1"
+            'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
+            'file': '62986583.mp3',
+            'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
+            'info_dict': {
+                "upload_date": "20121011",
+                "description": "No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
+                "uploader": "E.T. ExTerrestrial Music",
+                "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1"
             }
         },
         # not streamable song
         {
             }
         },
         # not streamable song
         {
-            u'url': u'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
-            u'info_dict': {
-                u'id': u'47127627',
-                u'ext': u'mp3',
-                u'title': u'Goldrushed',
-                u'uploader': u'The Royal Concept',
-                u'upload_date': u'20120521',
+            'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
+            'info_dict': {
+                'id': '47127627',
+                'ext': 'mp3',
+                'title': 'Goldrushed',
+                'uploader': 'The Royal Concept',
+                'upload_date': '20120521',
             },
             },
-            u'params': {
+            'params': {
                 # rtmp
                 # rtmp
-                u'skip_download': True,
+                'skip_download': True,
             },
         },
         # private link
         {
             },
         },
         # private link
         {
-            u'url': u'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
-            u'md5': u'aa0dd32bfea9b0c5ef4f02aacd080604',
-            u'info_dict': {
-                u'id': u'123998367',
-                u'ext': u'mp3',
-                u'title': u'Youtube - Dl Test Video \'\' Ä↭',
-                u'uploader': u'jaimeMF',
-                u'description': u'test chars:  \"\'/\\ä↭',
-                u'upload_date': u'20131209',
+            'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
+            'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
+            'info_dict': {
+                'id': '123998367',
+                'ext': 'mp3',
+                'title': 'Youtube - Dl Test Video \'\' Ä↭',
+                'uploader': 'jaimeMF',
+                'description': 'test chars:  \"\'/\\ä↭',
+                'upload_date': '20131209',
             },
         },
         # downloadable song
         {
             },
         },
         # downloadable song
         {
-            u'url': u'https://soundcloud.com/simgretina/just-your-problem-baby-1',
-            u'md5': u'56a8b69568acaa967b4c49f9d1d52d19',
-            u'info_dict': {
-                u'id': u'105614606',
-                u'ext': u'wav',
-                u'title': u'Just Your Problem Baby (Acapella)',
-                u'description': u'Vocals',
-                u'uploader': u'Sim Gretina',
-                u'upload_date': u'20130815',
+            'url': 'https://soundcloud.com/simgretina/just-your-problem-baby-1',
+            'md5': '56a8b69568acaa967b4c49f9d1d52d19',
+            'info_dict': {
+                'id': '105614606',
+                'ext': 'wav',
+                'title': 'Just Your Problem Baby (Acapella)',
+                'description': 'Vocals',
+                'uploader': 'Sim Gretina',
+                'upload_date': '20130815',
             },
         },
     ]
             },
         },
     ]
@@ -112,7 +114,7 @@ class SoundcloudIE(InfoExtractor):
         thumbnail = info['artwork_url']
         if thumbnail is not None:
             thumbnail = thumbnail.replace('-large', '-t500x500')
         thumbnail = info['artwork_url']
         if thumbnail is not None:
             thumbnail = thumbnail.replace('-large', '-t500x500')
-        ext = u'mp3'
+        ext = 'mp3'
         result = {
             'id': track_id,
             'uploader': info['user']['username'],
         result = {
             'id': track_id,
             'uploader': info['user']['username'],
@@ -124,11 +126,11 @@ class SoundcloudIE(InfoExtractor):
         if info.get('downloadable', False):
             # We can build a direct link to the song
             format_url = (
         if info.get('downloadable', False):
             # We can build a direct link to the song
             format_url = (
-                u'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format(
+                'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format(
                     track_id, self._CLIENT_ID))
             result['formats'] = [{
                 'format_id': 'download',
                     track_id, self._CLIENT_ID))
             result['formats'] = [{
                 'format_id': 'download',
-                'ext': info.get('original_format', u'mp3'),
+                'ext': info.get('original_format', 'mp3'),
                 'url': format_url,
                 'vcodec': 'none',
             }]
                 'url': format_url,
                 'vcodec': 'none',
             }]
@@ -138,7 +140,7 @@ class SoundcloudIE(InfoExtractor):
                 'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
             stream_json = self._download_webpage(
                 streams_url,
                 'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
             stream_json = self._download_webpage(
                 streams_url,
-                track_id, u'Downloading track url')
+                track_id, 'Downloading track url')
 
             formats = []
             format_dict = json.loads(stream_json)
 
             formats = []
             format_dict = json.loads(stream_json)
@@ -165,20 +167,19 @@ class SoundcloudIE(InfoExtractor):
                 # We fallback to the stream_url in the original info, this
                 # cannot be always used, sometimes it can give an HTTP 404 error
                 formats.append({
                 # We fallback to the stream_url in the original info, this
                 # cannot be always used, sometimes it can give an HTTP 404 error
                 formats.append({
-                    'format_id': u'fallback',
+                    'format_id': 'fallback',
                     'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID,
                     'ext': ext,
                     'vcodec': 'none',
                 })
 
                     'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID,
                     'ext': ext,
                     'vcodec': 'none',
                 })
 
-            def format_pref(f):
+            for f in formats:
                 if f['format_id'].startswith('http'):
                 if f['format_id'].startswith('http'):
-                    return 2
+                    f['protocol'] = 'http'
                 if f['format_id'].startswith('rtmp'):
                 if f['format_id'].startswith('rtmp'):
-                    return 1
-                return 0
+                    f['protocol'] = 'rtmp'
 
 
-            formats.sort(key=format_pref)
+            self._sort_formats(formats)
             result['formats'] = formats
 
         return result
             result['formats'] = formats
 
         return result
@@ -193,7 +194,7 @@ class SoundcloudIE(InfoExtractor):
         if track_id is not None:
             info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
             full_title = track_id
         if track_id is not None:
             info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
             full_title = track_id
-        elif mobj.group('widget'):
+        elif mobj.group('player'):
             query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
             return self.url_result(query['url'][0], ie='Soundcloud')
         else:
             query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
             return self.url_result(query['url'][0], ie='Soundcloud')
         else:
@@ -210,14 +211,14 @@ class SoundcloudIE(InfoExtractor):
     
             url = 'http://soundcloud.com/%s' % resolve_title
             info_json_url = self._resolv_url(url)
     
             url = 'http://soundcloud.com/%s' % resolve_title
             info_json_url = self._resolv_url(url)
-        info_json = self._download_webpage(info_json_url, full_title, u'Downloading info JSON')
+        info_json = self._download_webpage(info_json_url, full_title, 'Downloading info JSON')
 
         info = json.loads(info_json)
         return self._extract_info_dict(info, full_title, secret_token=token)
 
 class SoundcloudSetIE(SoundcloudIE):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)(?:[?].*)?$'
 
         info = json.loads(info_json)
         return self._extract_info_dict(info, full_title, secret_token=token)
 
 class SoundcloudSetIE(SoundcloudIE):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)(?:[?].*)?$'
-    IE_NAME = u'soundcloud:set'
+    IE_NAME = 'soundcloud:set'
     # it's in tests/test_playlists.py
     _TESTS = []
 
     # it's in tests/test_playlists.py
     _TESTS = []
 
@@ -254,7 +255,7 @@ class SoundcloudSetIE(SoundcloudIE):
 
 class SoundcloudUserIE(SoundcloudIE):
     _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)(/?(tracks/)?)?(\?.*)?$'
 
 class SoundcloudUserIE(SoundcloudIE):
     _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)(/?(tracks/)?)?(\?.*)?$'
-    IE_NAME = u'soundcloud:user'
+    IE_NAME = 'soundcloud:user'
 
     # it's in tests/test_playlists.py
     _TESTS = []
 
     # it's in tests/test_playlists.py
     _TESTS = []
@@ -266,7 +267,7 @@ class SoundcloudUserIE(SoundcloudIE):
         url = 'http://soundcloud.com/%s/' % uploader
         resolv_url = self._resolv_url(url)
         user_json = self._download_webpage(resolv_url, uploader,
         url = 'http://soundcloud.com/%s/' % uploader
         resolv_url = self._resolv_url(url)
         user_json = self._download_webpage(resolv_url, uploader,
-            u'Downloading user info')
+            'Downloading user info')
         user = json.loads(user_json)
 
         tracks = []
         user = json.loads(user_json)
 
         tracks = []
@@ -276,7 +277,7 @@ class SoundcloudUserIE(SoundcloudIE):
                                                   })
             tracks_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % user['id'] + data
             response = self._download_webpage(tracks_url, uploader, 
                                                   })
             tracks_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % user['id'] + data
             response = self._download_webpage(tracks_url, uploader, 
-                u'Downloading tracks page %s' % (i+1))
+                'Downloading tracks page %s' % (i+1))
             new_tracks = json.loads(response)
             tracks.extend(self._extract_info_dict(track, quiet=True) for track in new_tracks)
             if len(new_tracks) < 50:
             new_tracks = json.loads(response)
             tracks.extend(self._extract_info_dict(track, quiet=True) for track in new_tracks)
             if len(new_tracks) < 50:
index 9e2ad0d9962c375ca27851b3f842de302be28e56..3362b3db85c65c97d8839d8707e36fecf8bc6646 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import os
 import re
 
 import os
 import re
 
@@ -11,17 +13,18 @@ from ..aes import (
     aes_decrypt_text
 )
 
     aes_decrypt_text
 )
 
+
 class SpankwireIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
     _TEST = {
 class SpankwireIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
     _TEST = {
-        u'url': u'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
-        u'file': u'103545.mp4',
-        u'md5': u'1b3f55e345500552dbc252a3e9c1af43',
-        u'info_dict': {
-            u"uploader": u"oreusz", 
-            u"title": u"Buckcherry`s X Rated Music Video Crazy Bitch",
-            u"description": u"Crazy Bitch X rated music video.",
-            u"age_limit": 18,
+        'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
+        'file': '103545.mp4',
+        'md5': '1b3f55e345500552dbc252a3e9c1af43',
+        'info_dict': {
+            "uploader": "oreusz",
+            "title": "Buckcherry`s X Rated Music Video Crazy Bitch",
+            "description": "Crazy Bitch X rated music video.",
+            "age_limit": 18,
         }
     }
 
         }
     }
 
@@ -34,17 +37,17 @@ class SpankwireIE(InfoExtractor):
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
         req.add_header('Cookie', 'age_verified=1')
         webpage = self._download_webpage(req, video_id)
 
-        video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, u'title')
+        video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
         video_uploader = self._html_search_regex(
         video_uploader = self._html_search_regex(
-            r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False)
+            r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False)
         thumbnail = self._html_search_regex(
         thumbnail = self._html_search_regex(
-            r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False)
+            r'flashvars\.image_url = "([^"]+)', webpage, 'thumbnail', fatal=False)
         description = self._html_search_regex(
         description = self._html_search_regex(
-            r'<div\s+id="descriptionContent">([^<]+)<', webpage, u'description', fatal=False)
+            r'<div\s+id="descriptionContent">([^<]+)<', webpage, 'description', fatal=False)
 
         video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
         if webpage.find('flashvars\.encrypted = "true"') != -1:
 
         video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
         if webpage.find('flashvars\.encrypted = "true"') != -1:
-            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, u'password').replace('+', ' ')
+            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ')
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
 
         formats = []
             video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
 
         formats = []
@@ -52,14 +55,21 @@ class SpankwireIE(InfoExtractor):
             path = compat_urllib_parse_urlparse(video_url).path
             extension = os.path.splitext(path)[1][1:]
             format = path.split('/')[4].split('_')[:2]
             path = compat_urllib_parse_urlparse(video_url).path
             extension = os.path.splitext(path)[1][1:]
             format = path.split('/')[4].split('_')[:2]
+            resolution, bitrate_str = format
             format = "-".join(format)
             format = "-".join(format)
+            height = int(resolution.rstrip('P'))
+            tbr = int(bitrate_str.rstrip('K'))
+
             formats.append({
                 'url': video_url,
                 'ext': extension,
             formats.append({
                 'url': video_url,
                 'ext': extension,
+                'resolution': resolution,
                 'format': format,
                 'format': format,
+                'tbr': tbr,
+                'height': height,
                 'format_id': format,
             })
                 'format_id': format,
             })
-        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
+        self._sort_formats(formats)
 
         age_limit = self._rta_search(webpage)
 
 
         age_limit = self._rta_search(webpage)
 
index 6955205242dcbbba01cfac482d362b3ca292b6b6..051a34d5b8b048db9112ac2df5f3f04115447a87 100644 (file)
@@ -51,9 +51,10 @@ class SpiegelIE(InfoExtractor):
             # Blacklist type 6, it's extremely LQ and not available on the same server
             if n.tag.startswith('type') and n.tag != 'type6'
         ]
             # Blacklist type 6, it's extremely LQ and not available on the same server
             if n.tag.startswith('type') and n.tag != 'type6'
         ]
-        formats.sort(key=lambda f: f['vbr'])
         duration = float(idoc[0].findall('./duration')[0].text)
 
         duration = float(idoc[0].findall('./duration')[0].text)
 
+        self._sort_formats(formats)
+
         info = {
             'id': video_id,
             'title': video_title,
         info = {
             'id': video_id,
             'title': video_title,
index 2bf26d05682e8e2535d412c2718d78bec6077622..9dcffead04d5466c14c6f2ff60995ecfb5435e6d 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 
 from .common import InfoExtractor
 import re
 
 from .common import InfoExtractor
@@ -9,61 +11,66 @@ from ..utils import (
 class TeamcocoIE(InfoExtractor):
     _VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
     _TEST = {
 class TeamcocoIE(InfoExtractor):
     _VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
     _TEST = {
-        u'url': u'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
-        u'file': u'19705.mp4',
-        u'md5': u'cde9ba0fa3506f5f017ce11ead928f9a',
-        u'info_dict': {
-            u"description": u"Louis C.K. got starstruck by George W. Bush, so what? Part one.", 
-            u"title": u"Louis C.K. Interview Pt. 1 11/3/11"
+        'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
+        'file': '19705.mp4',
+        'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
+        'info_dict': {
+            "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.",
+            "title": "Louis C.K. Interview Pt. 1 11/3/11"
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
         }
     }
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
+            raise ExtractorError('Invalid URL: %s' % url)
         url_title = mobj.group('url_title')
         webpage = self._download_webpage(url, url_title)
 
         url_title = mobj.group('url_title')
         webpage = self._download_webpage(url, url_title)
 
-        video_id = self._html_search_regex(r'<article class="video" data-id="(\d+?)"',
-            webpage, u'video id')
+        video_id = self._html_search_regex(
+            r'<article class="video" data-id="(\d+?)"',
+            webpage, 'video id')
 
         self.report_extraction(video_id)
 
         data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
         data = self._download_xml(data_url, video_id, 'Downloading data webpage')
 
 
         self.report_extraction(video_id)
 
         data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
         data = self._download_xml(data_url, video_id, 'Downloading data webpage')
 
-
         qualities = ['500k', '480p', '1000k', '720p', '1080p']
         formats = []
         qualities = ['500k', '480p', '1000k', '720p', '1080p']
         formats = []
-        for file in data.findall('files/file'):
-            if file.attrib.get('playmode') == 'all':
+        for filed in data.findall('files/file'):
+            if filed.attrib.get('playmode') == 'all':
                 # it just duplicates one of the entries
                 break
                 # it just duplicates one of the entries
                 break
-            file_url = file.text
+            file_url = filed.text
             m_format = re.search(r'(\d+(k|p))\.mp4', file_url)
             if m_format is not None:
                 format_id = m_format.group(1)
             else:
             m_format = re.search(r'(\d+(k|p))\.mp4', file_url)
             if m_format is not None:
                 format_id = m_format.group(1)
             else:
-                format_id = file.attrib['bitrate']
+                format_id = filed.attrib['bitrate']
+            tbr = (
+                int(filed.attrib['bitrate'])
+                if filed.attrib['bitrate'].isdigit()
+                else None)
+
+            try:
+                quality = qualities.index(format_id)
+            except ValueError:
+                quality = -1
             formats.append({
                 'url': file_url,
                 'ext': 'mp4',
             formats.append({
                 'url': file_url,
                 'ext': 'mp4',
+                'tbr': tbr,
                 'format_id': format_id,
                 'format_id': format_id,
+                'quality': quality,
             })
             })
-        def sort_key(f):
-            try:
-                return qualities.index(f['format_id'])
-            except ValueError:
-                return -1
-        formats.sort(key=sort_key)
-        if not formats:
-            raise ExtractorError(u'Unable to extract video URL')
+
+        self._sort_formats(formats)
 
         return {
 
         return {
-            'id':          video_id,
+            'id': video_id,
             'formats': formats,
             'formats': formats,
-            'title':       self._og_search_title(webpage),
-            'thumbnail':   self._og_search_thumbnail(webpage),
+            'title': self._og_search_title(webpage),
+            'thumbnail': self._og_search_thumbnail(webpage),
             'description': self._og_search_description(webpage),
         }
             'description': self._og_search_description(webpage),
         }
index 4bca62ba003e325ebedd0fcc74c953bd64120cd5..8b31caa92c1e44473aa42953427b3cc2d71762f7 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import json
 import re
 
 import json
 import re
 
@@ -7,6 +9,7 @@ from ..utils import (
     RegexNotFoundError,
 )
 
     RegexNotFoundError,
 )
 
+
 class TEDIE(SubtitlesInfoExtractor):
     _VALID_URL=r'''http://www\.ted\.com/
                    (
 class TEDIE(SubtitlesInfoExtractor):
     _VALID_URL=r'''http://www\.ted\.com/
                    (
@@ -18,12 +21,12 @@ class TEDIE(SubtitlesInfoExtractor):
                    /(?P<name>\w+) # Here goes the name and then ".html"
                    '''
     _TEST = {
                    /(?P<name>\w+) # Here goes the name and then ".html"
                    '''
     _TEST = {
-        u'url': u'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
-        u'file': u'102.mp4',
-        u'md5': u'2d76ee1576672e0bd8f187513267adf6',
-        u'info_dict': {
-            u"description": u"md5:c6fa72e6eedbd938c9caf6b2702f5922", 
-            u"title": u"Dan Dennett: The illusion of consciousness"
+        'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
+        'file': '102.mp4',
+        'md5': '4ea1dada91e4174b53dac2bb8ace429d',
+        'info_dict': {
+            "description": "md5:c6fa72e6eedbd938c9caf6b2702f5922",
+            "title": "Dan Dennett: The illusion of consciousness"
         }
     }
 
         }
     }
 
@@ -47,7 +50,7 @@ class TEDIE(SubtitlesInfoExtractor):
         '''Returns the videos of the playlist'''
 
         webpage = self._download_webpage(
         '''Returns the videos of the playlist'''
 
         webpage = self._download_webpage(
-            url, playlist_id, u'Downloading playlist webpage')
+            url, playlist_id, 'Downloading playlist webpage')
         matches = re.finditer(
             r'<p\s+class="talk-title[^"]*"><a\s+href="(?P<talk_url>/talks/[^"]+\.html)">[^<]*</a></p>',
             webpage)
         matches = re.finditer(
             r'<p\s+class="talk-title[^"]*"><a\s+href="(?P<talk_url>/talks/[^"]+\.html)">[^<]*</a></p>',
             webpage)
index cec65261bfffd2a25702634047a99526fa3a7d10..23172143ec41ecc48e88c333dfdd476db330e1b8 100644 (file)
@@ -55,15 +55,21 @@ class ThePlatformIE(InfoExtractor):
         formats = []
         for f in switch.findall(_x('smil:video')):
             attr = f.attrib
         formats = []
         for f in switch.findall(_x('smil:video')):
             attr = f.attrib
+            width = int(attr['width'])
+            height = int(attr['height'])
+            vbr = int(attr['system-bitrate']) // 1000
+            format_id = '%dx%d_%dk' % (width, height, vbr)
             formats.append({
             formats.append({
+                'format_id': format_id,
                 'url': base_url,
                 'play_path': 'mp4:' + attr['src'],
                 'ext': 'flv',
                 'url': base_url,
                 'play_path': 'mp4:' + attr['src'],
                 'ext': 'flv',
-                'width': int(attr['width']),
-                'height': int(attr['height']),
-                'vbr': int(attr['system-bitrate']),
+                'width': width,
+                'height': height,
+                'vbr': vbr,
             })
             })
-        formats.sort(key=lambda f: (f['height'], f['width'], f['vbr']))
+
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index 3cf8c853d2e466e00228d7eb3cb0f33d664beb9b..b1c854a646c601d4dadaa1dce7fab8d6fc315b3d 100644 (file)
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 import re
 import json
 
@@ -8,16 +10,17 @@ from ..utils import (
     clean_html,
 )
 
     clean_html,
 )
 
+
 class VeeHDIE(InfoExtractor):
     _VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)'
 
     _TEST = {
 class VeeHDIE(InfoExtractor):
     _VALID_URL = r'https?://veehd\.com/video/(?P<id>\d+)'
 
     _TEST = {
-        u'url': u'http://veehd.com/video/4686958',
-        u'file': u'4686958.mp4',
-        u'info_dict': {
-            u'title': u'Time Lapse View from Space ( ISS)',
-            u'uploader_id': u'spotted',
-            u'description': u'md5:f0094c4cf3a72e22bc4e4239ef767ad7',
+        'url': 'http://veehd.com/video/4686958',
+        'file': '4686958.mp4',
+        'info_dict': {
+            'title': 'Time Lapse View from Space ( ISS)',
+            'uploader_id': 'spotted',
+            'description': 'md5:f0094c4cf3a72e22bc4e4239ef767ad7',
         },
     }
 
         },
     }
 
@@ -25,24 +28,30 @@ class VeeHDIE(InfoExtractor):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
 
+        # VeeHD seems to send garbage on the first request.
+        # See https://github.com/rg3/youtube-dl/issues/2102
+        self._download_webpage(url, video_id, 'Requesting webpage')
         webpage = self._download_webpage(url, video_id)
         webpage = self._download_webpage(url, video_id)
-        player_path = self._search_regex(r'\$\("#playeriframe"\).attr\({src : "(.+?)"',
-            webpage, u'player path')
+        player_path = self._search_regex(
+            r'\$\("#playeriframe"\).attr\({src : "(.+?)"',
+            webpage, 'player path')
         player_url = compat_urlparse.urljoin(url, player_path)
         player_url = compat_urlparse.urljoin(url, player_path)
-        player_page = self._download_webpage(player_url, video_id,
-            u'Downloading player page')
-        config_json = self._search_regex(r'value=\'config=({.+?})\'',
-            player_page, u'config json')
+
+        self._download_webpage(player_url, video_id, 'Requesting player page')
+        player_page = self._download_webpage(
+            player_url, video_id, 'Downloading player page')
+        config_json = self._search_regex(
+            r'value=\'config=({.+?})\'', player_page, 'config json')
         config = json.loads(config_json)
 
         video_url = compat_urlparse.unquote(config['clip']['url'])
         title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
         uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
         config = json.loads(config_json)
 
         video_url = compat_urlparse.unquote(config['clip']['url'])
         title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])
         uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>',
-            webpage, u'uploader')
+            webpage, 'uploader')
         thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
         thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"',
-            webpage, u'thumbnail')
+            webpage, 'thumbnail')
         description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
         description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul',
-            webpage, u'description', flags=re.DOTALL)
+            webpage, 'description', flags=re.DOTALL)
 
         return {
             '_type': 'video',
 
         return {
             '_type': 'video',
index 00672c9e5f4bdb5f2c33112bdee4d97022b24e9f..baa57f3438603e6a5d0d2b406389df4110353ebe 100644 (file)
@@ -1,22 +1,22 @@
+from __future__ import unicode_literals
+
 import re
 import json
 
 from .common import InfoExtractor
 import re
 import json
 
 from .common import InfoExtractor
-from ..utils import (
-    determine_ext,
-)
+
 
 class VeohIE(InfoExtractor):
 
 class VeohIE(InfoExtractor):
-    _VALID_URL = r'http://www\.veoh\.com/watch/v(?P<id>\d*)'
+    _VALID_URL = r'http://(?:www\.)?veoh\.com/(?:watch|iphone/#_Watch)/v(?P<id>\d*)'
 
     _TEST = {
 
     _TEST = {
-        u'url': u'http://www.veoh.com/watch/v56314296nk7Zdmz3',
-        u'file': u'56314296.mp4',
-        u'md5': u'620e68e6a3cff80086df3348426c9ca3',
-        u'info_dict': {
-            u'title': u'Straight Backs Are Stronger',
-            u'uploader': u'LUMOback',
-            u'description': u'At LUMOback, we believe straight backs are stronger.  The LUMOback Posture & Movement Sensor:  It gently vibrates when you slouch, inspiring improved posture and mobility.  Use the app to track your data and improve your posture over time. ',
+        'url': 'http://www.veoh.com/watch/v56314296nk7Zdmz3',
+        'file': '56314296.mp4',
+        'md5': '620e68e6a3cff80086df3348426c9ca3',
+        'info_dict': {
+            'title': 'Straight Backs Are Stronger',
+            'uploader': 'LUMOback',
+            'description': 'At LUMOback, we believe straight backs are stronger.  The LUMOback Posture & Movement Sensor:  It gently vibrates when you slouch, inspiring improved posture and mobility.  Use the app to track your data and improve your posture over time. ',
         }
     }
 
         }
     }
 
@@ -28,20 +28,20 @@ class VeohIE(InfoExtractor):
         m_youtube = re.search(r'http://www\.youtube\.com/v/(.*?)(\&|")', webpage)
         if m_youtube is not None:
             youtube_id = m_youtube.group(1)
         m_youtube = re.search(r'http://www\.youtube\.com/v/(.*?)(\&|")', webpage)
         if m_youtube is not None:
             youtube_id = m_youtube.group(1)
-            self.to_screen(u'%s: detected Youtube video.' % video_id)
+            self.to_screen('%s: detected Youtube video.' % video_id)
             return self.url_result(youtube_id, 'Youtube')
 
         self.report_extraction(video_id)
         info = self._search_regex(r'videoDetailsJSON = \'({.*?})\';', webpage, 'info')
         info = json.loads(info)
             return self.url_result(youtube_id, 'Youtube')
 
         self.report_extraction(video_id)
         info = self._search_regex(r'videoDetailsJSON = \'({.*?})\';', webpage, 'info')
         info = json.loads(info)
-        video_url =  info.get('fullPreviewHashHighPath') or info.get('fullPreviewHashLowPath')
-
-        return {'id': info['videoId'], 
-                'title': info['title'],
-                'ext': determine_ext(video_url),
-                'url': video_url,
-                'uploader': info['username'],
-                'thumbnail': info.get('highResImage') or info.get('medResImage'),
-                'description': info['description'],
-                'view_count': info['views'],
-                }
+        video_url = info.get('fullPreviewHashHighPath') or info.get('fullPreviewHashLowPath')
+
+        return {
+            'id': info['videoId'],
+            'title': info['title'],
+            'url': video_url,
+            'uploader': info['username'],
+            'thumbnail': info.get('highResImage') or info.get('medResImage'),
+            'description': info['description'],
+            'view_count': info['views'],
+        }
index c3623fcbe6b01493c5ec2115f4fe5f2d32737e59..1936755499ceca890c7b4513298a64444293b71d 100644 (file)
@@ -1,4 +1,6 @@
 # encoding: utf-8
 # encoding: utf-8
+from __future__ import unicode_literals
+
 import json
 import re
 import itertools
 import json
 import re
 import itertools
@@ -22,7 +24,7 @@ class VimeoIE(InfoExtractor):
 
     # _VALID_URL matches Vimeo URLs
     _VALID_URL = r'''(?x)
 
     # _VALID_URL matches Vimeo URLs
     _VALID_URL = r'''(?x)
-        (?P<proto>https?://)?
+        (?P<proto>(?:https?:)?//)?
         (?:(?:www|(?P<player>player))\.)?
         vimeo(?P<pro>pro)?\.com/
         (?:.*?/)?
         (?:(?:www|(?P<player>player))\.)?
         vimeo(?P<pro>pro)?\.com/
         (?:.*?/)?
@@ -31,54 +33,55 @@ class VimeoIE(InfoExtractor):
         (?P<id>[0-9]+)
         /?(?:[?&].*)?(?:[#].*)?$'''
     _NETRC_MACHINE = 'vimeo'
         (?P<id>[0-9]+)
         /?(?:[?&].*)?(?:[#].*)?$'''
     _NETRC_MACHINE = 'vimeo'
-    IE_NAME = u'vimeo'
+    IE_NAME = 'vimeo'
     _TESTS = [
         {
     _TESTS = [
         {
-            u'url': u'http://vimeo.com/56015672#at=0',
-            u'file': u'56015672.mp4',
-            u'md5': u'8879b6cc097e987f02484baf890129e5',
-            u'info_dict': {
-                u"upload_date": u"20121220", 
-                u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", 
-                u"uploader_id": u"user7108434", 
-                u"uploader": u"Filippo Valsorda", 
-                u"title": u"youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
+            'url': 'http://vimeo.com/56015672#at=0',
+            'file': '56015672.mp4',
+            'md5': '8879b6cc097e987f02484baf890129e5',
+            'info_dict': {
+                "upload_date": "20121220", 
+                "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", 
+                "uploader_id": "user7108434", 
+                "uploader": "Filippo Valsorda", 
+                "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
             },
         },
         {
             },
         },
         {
-            u'url': u'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
-            u'file': u'68093876.mp4',
-            u'md5': u'3b5ca6aa22b60dfeeadf50b72e44ed82',
-            u'note': u'Vimeo Pro video (#1197)',
-            u'info_dict': {
-                u'uploader_id': u'openstreetmapus', 
-                u'uploader': u'OpenStreetMap US', 
-                u'title': u'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
+            'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
+            'file': '68093876.mp4',
+            'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
+            'note': 'Vimeo Pro video (#1197)',
+            'info_dict': {
+                'uploader_id': 'openstreetmapus',
+                'uploader': 'OpenStreetMap US',
+                'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
             },
         },
         {
             },
         },
         {
-            u'url': u'http://player.vimeo.com/video/54469442',
-            u'file': u'54469442.mp4',
-            u'md5': u'619b811a4417aa4abe78dc653becf511',
-            u'note': u'Videos that embed the url in the player page',
-            u'info_dict': {
-                u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software',
-                u'uploader': u'The BLN & Business of Software',
+            'url': 'http://player.vimeo.com/video/54469442',
+            'file': '54469442.mp4',
+            'md5': '619b811a4417aa4abe78dc653becf511',
+            'note': 'Videos that embed the url in the player page',
+            'info_dict': {
+                'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software',
+                'uploader': 'The BLN & Business of Software',
+                'uploader_id': 'theblnbusinessofsoftware',
             },
         },
         {
             },
         },
         {
-            u'url': u'http://vimeo.com/68375962',
-            u'file': u'68375962.mp4',
-            u'md5': u'aaf896bdb7ddd6476df50007a0ac0ae7',
-            u'note': u'Video protected with password',
-            u'info_dict': {
-                u'title': u'youtube-dl password protected test video',
-                u'upload_date': u'20130614',
-                u'uploader_id': u'user18948128',
-                u'uploader': u'Jaime Marquínez Ferrándiz',
+            'url': 'http://vimeo.com/68375962',
+            'file': '68375962.mp4',
+            'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
+            'note': 'Video protected with password',
+            'info_dict': {
+                'title': 'youtube-dl password protected test video',
+                'upload_date': '20130614',
+                'uploader_id': 'user18948128',
+                'uploader': 'Jaime Marquínez Ferrándiz',
             },
             },
-            u'params': {
-                u'videopassword': u'youtube-dl',
+            'params': {
+                'videopassword': 'youtube-dl',
             },
         },
     ]
             },
         },
     ]
@@ -90,7 +93,7 @@ class VimeoIE(InfoExtractor):
         self.report_login()
         login_url = 'https://vimeo.com/log_in'
         webpage = self._download_webpage(login_url, None, False)
         self.report_login()
         login_url = 'https://vimeo.com/log_in'
         webpage = self._download_webpage(login_url, None, False)
-        token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
+        token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
         data = compat_urllib_parse.urlencode({'email': username,
                                               'password': password,
                                               'action': 'login',
         data = compat_urllib_parse.urlencode({'email': username,
                                               'password': password,
                                               'action': 'login',
@@ -100,13 +103,13 @@ class VimeoIE(InfoExtractor):
         login_request = compat_urllib_request.Request(login_url, data)
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_request.add_header('Cookie', 'xsrft=%s' % token)
         login_request = compat_urllib_request.Request(login_url, data)
         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         login_request.add_header('Cookie', 'xsrft=%s' % token)
-        self._download_webpage(login_request, None, False, u'Wrong login info')
+        self._download_webpage(login_request, None, False, 'Wrong login info')
 
     def _verify_video_password(self, url, video_id, webpage):
         password = self._downloader.params.get('videopassword', None)
         if password is None:
 
     def _verify_video_password(self, url, video_id, webpage):
         password = self._downloader.params.get('videopassword', None)
         if password is None:
-            raise ExtractorError(u'This video is protected by a password, use the --video-password option')
-        token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
+            raise ExtractorError('This video is protected by a password, use the --video-password option')
+        token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
         data = compat_urllib_parse.urlencode({'password': password,
                                               'token': token})
         # I didn't manage to use the password with https
         data = compat_urllib_parse.urlencode({'password': password,
                                               'token': token})
         # I didn't manage to use the password with https
@@ -118,8 +121,21 @@ class VimeoIE(InfoExtractor):
         password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         password_request.add_header('Cookie', 'xsrft=%s' % token)
         self._download_webpage(password_request, video_id,
         password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
         password_request.add_header('Cookie', 'xsrft=%s' % token)
         self._download_webpage(password_request, video_id,
-                               u'Verifying the password',
-                               u'Wrong password')
+                               'Verifying the password',
+                               'Wrong password')
+
+    def _verify_player_video_password(self, url, video_id):
+        password = self._downloader.params.get('videopassword', None)
+        if password is None:
+            raise ExtractorError('This video is protected by a password, use the --video-password option')
+        data = compat_urllib_parse.urlencode({'password': password})
+        pass_url = url + '/check-password'
+        password_request = compat_urllib_request.Request(pass_url, data)
+        password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        return self._download_json(
+            password_request, video_id,
+            'Verifying the password',
+            'Wrong password')
 
     def _real_initialize(self):
         self._login()
 
     def _real_initialize(self):
         self._login()
@@ -133,9 +149,6 @@ class VimeoIE(InfoExtractor):
 
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
 
         # Extract ID from URL
         mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
         video_id = mobj.group('id')
         if mobj.group('pro') or mobj.group('player'):
             url = 'http://player.vimeo.com/video/' + video_id
         video_id = mobj.group('id')
         if mobj.group('pro') or mobj.group('player'):
             url = 'http://player.vimeo.com/video/' + video_id
@@ -155,7 +168,7 @@ class VimeoIE(InfoExtractor):
         try:
             try:
                 config_url = self._html_search_regex(
         try:
             try:
                 config_url = self._html_search_regex(
-                    r' data-config-url="(.+?)"', webpage, u'config URL')
+                    r' data-config-url="(.+?)"', webpage, 'config URL')
                 config_json = self._download_webpage(config_url, video_id)
                 config = json.loads(config_json)
             except RegexNotFoundError:
                 config_json = self._download_webpage(config_url, video_id)
                 config = json.loads(config_json)
             except RegexNotFoundError:
@@ -166,19 +179,22 @@ class VimeoIE(InfoExtractor):
                     config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
                 else:
                     config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
                     config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
                 else:
                     config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
-                config = self._search_regex(config_re, webpage, u'info section',
+                config = self._search_regex(config_re, webpage, 'info section',
                     flags=re.DOTALL)
                 config = json.loads(config)
         except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
                     flags=re.DOTALL)
                 config = json.loads(config)
         except Exception as e:
             if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
-                raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
+                raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
 
             if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
                 self._verify_video_password(url, video_id, webpage)
                 return self._real_extract(url)
             else:
 
             if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
                 self._verify_video_password(url, video_id, webpage)
                 return self._real_extract(url)
             else:
-                raise ExtractorError(u'Unable to extract info section',
+                raise ExtractorError('Unable to extract info section',
                                      cause=e)
                                      cause=e)
+        else:
+            if config.get('view') == 4:
+                config = self._verify_player_video_password(url, video_id)
 
         # Extract title
         video_title = config["video"]["title"]
 
         # Extract title
         video_title = config["video"]["title"]
@@ -212,9 +228,9 @@ class VimeoIE(InfoExtractor):
             video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
 
         try:
             video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
 
         try:
-            view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, u'view count'))
-            like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, u'like count'))
-            comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, u'comment count'))
+            view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
+            like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
+            comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
         except RegexNotFoundError:
             # This info is only available in vimeo.com/{id} urls
             view_count = None
         except RegexNotFoundError:
             # This info is only available in vimeo.com/{id} urls
             view_count = None
@@ -255,7 +271,7 @@ class VimeoIE(InfoExtractor):
         for key in ('other', 'sd', 'hd'):
             formats += files[key]
         if len(formats) == 0:
         for key in ('other', 'sd', 'hd'):
             formats += files[key]
         if len(formats) == 0:
-            raise ExtractorError(u'No known codec found')
+            raise ExtractorError('No known codec found')
 
         return {
             'id':       video_id,
 
         return {
             'id':       video_id,
@@ -274,7 +290,7 @@ class VimeoIE(InfoExtractor):
 
 
 class VimeoChannelIE(InfoExtractor):
 
 
 class VimeoChannelIE(InfoExtractor):
-    IE_NAME = u'vimeo:channel'
+    IE_NAME = 'vimeo:channel'
     _VALID_URL = r'(?:https?://)?vimeo.\com/channels/(?P<id>[^/]+)'
     _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
     _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
     _VALID_URL = r'(?:https?://)?vimeo.\com/channels/(?P<id>[^/]+)'
     _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
     _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
@@ -283,14 +299,14 @@ class VimeoChannelIE(InfoExtractor):
         return '%s/videos/page:%d/' % (base_url, pagenum)
 
     def _extract_list_title(self, webpage):
         return '%s/videos/page:%d/' % (base_url, pagenum)
 
     def _extract_list_title(self, webpage):
-        return self._html_search_regex(self._TITLE_RE, webpage, u'list title')
+        return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
 
     def _extract_videos(self, list_id, base_url):
         video_ids = []
         for pagenum in itertools.count(1):
             webpage = self._download_webpage(
                 self._page_url(base_url, pagenum) ,list_id,
 
     def _extract_videos(self, list_id, base_url):
         video_ids = []
         for pagenum in itertools.count(1):
             webpage = self._download_webpage(
                 self._page_url(base_url, pagenum) ,list_id,
-                u'Downloading page %s' % pagenum)
+                'Downloading page %s' % pagenum)
             video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
             if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
                 break
             video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
             if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
                 break
@@ -310,8 +326,8 @@ class VimeoChannelIE(InfoExtractor):
 
 
 class VimeoUserIE(VimeoChannelIE):
 
 
 class VimeoUserIE(VimeoChannelIE):
-    IE_NAME = u'vimeo:user'
-    _VALID_URL = r'(?:https?://)?vimeo.\com/(?P<name>[^/]+)'
+    IE_NAME = 'vimeo:user'
+    _VALID_URL = r'(?:https?://)?vimeo.\com/(?P<name>[^/]+)(?:/videos|[#?]|$)'
     _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
 
     @classmethod
     _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
 
     @classmethod
@@ -327,7 +343,7 @@ class VimeoUserIE(VimeoChannelIE):
 
 
 class VimeoAlbumIE(VimeoChannelIE):
 
 
 class VimeoAlbumIE(VimeoChannelIE):
-    IE_NAME = u'vimeo:album'
+    IE_NAME = 'vimeo:album'
     _VALID_URL = r'(?:https?://)?vimeo.\com/album/(?P<id>\d+)'
     _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
 
     _VALID_URL = r'(?:https?://)?vimeo.\com/album/(?P<id>\d+)'
     _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
 
@@ -336,12 +352,12 @@ class VimeoAlbumIE(VimeoChannelIE):
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        album_id =  mobj.group('id')
+        album_id = mobj.group('id')
         return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
 
 
 class VimeoGroupsIE(VimeoAlbumIE):
         return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
 
 
 class VimeoGroupsIE(VimeoAlbumIE):
-    IE_NAME = u'vimeo:group'
+    IE_NAME = 'vimeo:group'
     _VALID_URL = r'(?:https?://)?vimeo.\com/groups/(?P<name>[^/]+)'
 
     def _extract_list_title(self, webpage):
     _VALID_URL = r'(?:https?://)?vimeo.\com/groups/(?P<name>[^/]+)'
 
     def _extract_list_title(self, webpage):
@@ -351,3 +367,24 @@ class VimeoGroupsIE(VimeoAlbumIE):
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
         mobj = re.match(self._VALID_URL, url)
         name = mobj.group('name')
         return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
+
+
+class VimeoReviewIE(InfoExtractor):
+    IE_NAME = 'vimeo:review'
+    IE_DESC = 'Review pages on vimeo'
+    _VALID_URL = r'(?:https?://)?vimeo.\com/[^/]+/review/(?P<id>[^/]+)'
+    _TEST = {
+        'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
+        'file': '75524534.mp4',
+        'md5': 'c507a72f780cacc12b2248bb4006d253',
+        'info_dict': {
+            'title': "DICK HARDWICK 'Comedian'",
+            'uploader': 'Richard Hardwick',
+        }
+    }
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        player_url = 'https://player.vimeo.com/player/' + video_id
+        return self.url_result(player_url, 'Vimeo', video_id)
index e1748c2613bbcf94bb36b706ce7ddfdde6b2b86c..bc31c2e64f22999adf575e60d59bde3d903bb9cc 100644 (file)
@@ -44,8 +44,10 @@ class WistiaIE(InfoExtractor):
                 'height': a['height'],
                 'filesize': a['size'],
                 'ext': a['ext'],
                 'height': a['height'],
                 'filesize': a['size'],
                 'ext': a['ext'],
+                'preference': 1 if atype == 'original' else None,
             })
             })
-        formats.sort(key=lambda a: a['filesize'])
+
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index 5c9c361b9ee5658d307a7759040b855a3e794cf1..e17a39782bd2e674855dff8a5ec3112bd40158c6 100644 (file)
@@ -6,8 +6,8 @@ from .common import InfoExtractor, SearchInfoExtractor
 from ..utils import (
     compat_urllib_parse,
     compat_urlparse,
 from ..utils import (
     compat_urllib_parse,
     compat_urlparse,
-    determine_ext,
     clean_html,
     clean_html,
+    int_or_none,
 )
 
 
 )
 
 
@@ -68,9 +68,9 @@ class YahooIE(InfoExtractor):
         formats = []
         for s in info['streams']:
             format_info = {
         formats = []
         for s in info['streams']:
             format_info = {
-                'width': s.get('width'),
-                'height': s.get('height'),
-                'bitrate': s.get('bitrate'),
+                'width': int_or_none(s.get('width')),
+                'height': int_or_none(s.get('height')),
+                'tbr': int_or_none(s.get('bitrate')),
             }
 
             host = s['host']
             }
 
             host = s['host']
@@ -84,10 +84,10 @@ class YahooIE(InfoExtractor):
             else:
                 format_url = compat_urlparse.urljoin(host, path)
                 format_info['url'] = format_url
             else:
                 format_url = compat_urlparse.urljoin(host, path)
                 format_info['url'] = format_url
-                format_info['ext'] = determine_ext(format_url)
                 
             formats.append(format_info)
                 
             formats.append(format_info)
-        formats = sorted(formats, key=lambda f:(f['height'], f['width']))
+
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
index bd0f2cae0298dec0d78f812153976ec6a8434bb0..77ad423c44b38af655fc14a8918dfbcf677ca936 100644 (file)
@@ -1,5 +1,4 @@
 import json
 import json
-import os
 import re
 import sys
 
 import re
 import sys
 
@@ -16,6 +15,7 @@ from ..aes import (
     aes_decrypt_text
 )
 
     aes_decrypt_text
 )
 
+
 class YouPornIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
     _TEST = {
 class YouPornIE(InfoExtractor):
     _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
     _TEST = {
@@ -23,9 +23,9 @@ class YouPornIE(InfoExtractor):
         u'file': u'505835.mp4',
         u'md5': u'71ec5fcfddacf80f495efa8b6a8d9a89',
         u'info_dict': {
         u'file': u'505835.mp4',
         u'md5': u'71ec5fcfddacf80f495efa8b6a8d9a89',
         u'info_dict': {
-            u"upload_date": u"20101221", 
-            u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?", 
-            u"uploader": u"Ask Dan And Jennifer", 
+            u"upload_date": u"20101221",
+            u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?",
+            u"uploader": u"Ask Dan And Jennifer",
             u"title": u"Sex Ed: Is It Safe To Masturbate Daily?",
             u"age_limit": 18,
         }
             u"title": u"Sex Ed: Is It Safe To Masturbate Daily?",
             u"age_limit": 18,
         }
@@ -71,38 +71,36 @@ class YouPornIE(InfoExtractor):
             link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
             links.append(link)
         
             link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
             links.append(link)
         
-        if not links:
-            raise ExtractorError(u'ERROR: no known formats available for video')
-
         formats = []
         for link in links:
         formats = []
         for link in links:
-
             # A link looks like this:
             # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
             # A path looks like this:
             # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
             video_url = unescapeHTML(link)
             path = compat_urllib_parse_urlparse(video_url).path
             # A link looks like this:
             # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
             # A path looks like this:
             # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
             video_url = unescapeHTML(link)
             path = compat_urllib_parse_urlparse(video_url).path
-            extension = os.path.splitext(path)[1][1:]
-            format = path.split('/')[4].split('_')[:2]
+            format_parts = path.split('/')[4].split('_')[:2]
 
 
-            # size = format[0]
-            # bitrate = format[1]
-            format = "-".join(format)
-            # title = u'%s-%s-%s' % (video_title, size, bitrate)
+            dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0]
+
+            resolution = format_parts[0]
+            height = int(resolution[:-len('p')])
+            bitrate = int(format_parts[1][:-len('k')])
+            format = u'-'.join(format_parts) + u'-' + dn
 
             formats.append({
                 'url': video_url,
 
             formats.append({
                 'url': video_url,
-                'ext': extension,
                 'format': format,
                 'format_id': format,
                 'format': format,
                 'format_id': format,
+                'height': height,
+                'tbr': bitrate,
+                'resolution': resolution,
             })
 
             })
 
-        # Sort and remove doubles
-        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-'))))
-        for i in range(len(formats)-1,0,-1):
-            if formats[i]['format_id'] == formats[i-1]['format_id']:
-                del formats[i]
+        self._sort_formats(formats)
+
+        if not formats:
+            raise ExtractorError(u'ERROR: no known formats available for video')
         
         return {
             'id': video_id,
         
         return {
             'id': video_id,
index a68576547e85f344d7ccaa78092fc0146b2e935e..bf3fde61020490d82e095dc75bf21f04b1219bce 100644 (file)
@@ -131,6 +131,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                      (
                          (?:https?://|//)?                                    # http(s):// or protocol-independent URL (optional)
                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
                      (
                          (?:https?://|//)?                                    # http(s):// or protocol-independent URL (optional)
                          (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
+                            (?:www\.)?deturl\.com/www\.youtube\.com/|
+                            (?:www\.)?pwnyoutube\.com|
                             tube\.majestyc\.net/|
                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
                             tube\.majestyc\.net/|
                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
@@ -150,168 +152,72 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                      (?(1).+)?                                                # if we found the ID, everything can follow
                      $"""
     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
                      (?(1).+)?                                                # if we found the ID, everything can follow
                      $"""
     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
-    # Listed in order of quality
-    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '36', '17', '13',
-                          # Apple HTTP Live Streaming
-                          '96', '95', '94', '93', '92', '132', '151',
-                          # 3D
-                          '85', '84', '102', '83', '101', '82', '100',
-                          # Dash video
-                          '138', '137', '248', '136', '247', '135', '246',
-                          '245', '244', '134', '243', '133', '242', '160',
-                          # Dash audio
-                          '141', '172', '140', '171', '139',
-                          ]
-    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '36', '17', '13',
-                                      # Apple HTTP Live Streaming
-                                      '96', '95', '94', '93', '92', '132', '151',
-                                      # 3D
-                                      '85', '102', '84', '101', '83', '100', '82',
-                                      # Dash video
-                                      '138', '248', '137', '247', '136', '246', '245',
-                                      '244', '135', '243', '134', '242', '133', '160',
-                                      # Dash audio
-                                      '172', '141', '171', '140', '139',
-                                      ]
-    _video_formats_map = {
-        'flv': ['35', '34', '6', '5'],
-        '3gp': ['36', '17', '13'],
-        'mp4': ['38', '37', '22', '18'],
-        'webm': ['46', '45', '44', '43'],
-    }
-    _video_extensions = {
-        '13': '3gp',
-        '17': '3gp',
-        '18': 'mp4',
-        '22': 'mp4',
-        '36': '3gp',
-        '37': 'mp4',
-        '38': 'mp4',
-        '43': 'webm',
-        '44': 'webm',
-        '45': 'webm',
-        '46': 'webm',
+    _formats = {
+        '5': {'ext': 'flv', 'width': 400, 'height': 240},
+        '6': {'ext': 'flv', 'width': 450, 'height': 270},
+        '13': {'ext': '3gp'},
+        '17': {'ext': '3gp', 'width': 176, 'height': 144},
+        '18': {'ext': 'mp4', 'width': 640, 'height': 360},
+        '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
+        '34': {'ext': 'flv', 'width': 640, 'height': 360},
+        '35': {'ext': 'flv', 'width': 854, 'height': 480},
+        '36': {'ext': '3gp', 'width': 320, 'height': 240},
+        '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
+        '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
+        '43': {'ext': 'webm', 'width': 640, 'height': 360},
+        '44': {'ext': 'webm', 'width': 854, 'height': 480},
+        '45': {'ext': 'webm', 'width': 1280, 'height': 720},
+        '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
+
 
         # 3d videos
 
         # 3d videos
-        '82': 'mp4',
-        '83': 'mp4',
-        '84': 'mp4',
-        '85': 'mp4',
-        '100': 'webm',
-        '101': 'webm',
-        '102': 'webm',
+        '82': {'ext': 'mp4', 'height': 360, 'resolution': '360p', 'format_note': '3D', 'preference': -20},
+        '83': {'ext': 'mp4', 'height': 480, 'resolution': '480p', 'format_note': '3D', 'preference': -20},
+        '84': {'ext': 'mp4', 'height': 720, 'resolution': '720p', 'format_note': '3D', 'preference': -20},
+        '85': {'ext': 'mp4', 'height': 1080, 'resolution': '1080p', 'format_note': '3D', 'preference': -20},
+        '100': {'ext': 'webm', 'height': 360, 'resolution': '360p', 'format_note': '3D', 'preference': -20},
+        '101': {'ext': 'webm', 'height': 480, 'resolution': '480p', 'format_note': '3D', 'preference': -20},
+        '102': {'ext': 'webm', 'height': 720, 'resolution': '720p', 'format_note': '3D', 'preference': -20},
 
         # Apple HTTP Live Streaming
 
         # Apple HTTP Live Streaming
-        '92': 'mp4',
-        '93': 'mp4',
-        '94': 'mp4',
-        '95': 'mp4',
-        '96': 'mp4',
-        '132': 'mp4',
-        '151': 'mp4',
-
-        # Dash mp4
-        '133': 'mp4',
-        '134': 'mp4',
-        '135': 'mp4',
-        '136': 'mp4',
-        '137': 'mp4',
-        '138': 'mp4',
-        '160': 'mp4',
+        '92': {'ext': 'mp4', 'height': 240, 'resolution': '240p', 'format_note': 'HLS', 'preference': -10},
+        '93': {'ext': 'mp4', 'height': 360, 'resolution': '360p', 'format_note': 'HLS', 'preference': -10},
+        '94': {'ext': 'mp4', 'height': 480, 'resolution': '480p', 'format_note': 'HLS', 'preference': -10},
+        '95': {'ext': 'mp4', 'height': 720, 'resolution': '720p', 'format_note': 'HLS', 'preference': -10},
+        '96': {'ext': 'mp4', 'height': 1080, 'resolution': '1080p', 'format_note': 'HLS', 'preference': -10},
+        '132': {'ext': 'mp4', 'height': 240, 'resolution': '240p', 'format_note': 'HLS', 'preference': -10},
+        '151': {'ext': 'mp4', 'height': 72, 'resolution': '72p', 'format_note': 'HLS', 'preference': -10},
+
+        # DASH mp4 video
+        '133': {'ext': 'mp4', 'height': 240, 'resolution': '240p', 'format_note': 'DASH video', 'preference': -40},
+        '134': {'ext': 'mp4', 'height': 360, 'resolution': '360p', 'format_note': 'DASH video', 'preference': -40},
+        '135': {'ext': 'mp4', 'height': 480, 'resolution': '480p', 'format_note': 'DASH video', 'preference': -40},
+        '136': {'ext': 'mp4', 'height': 720, 'resolution': '720p', 'format_note': 'DASH video', 'preference': -40},
+        '137': {'ext': 'mp4', 'height': 1080, 'resolution': '1080p', 'format_note': 'DASH video', 'preference': -40},
+        '138': {'ext': 'mp4', 'height': 1081, 'resolution': '>1080p', 'format_note': 'DASH video', 'preference': -40},
+        '160': {'ext': 'mp4', 'height': 192, 'resolution': '192p', 'format_note': 'DASH video', 'preference': -40},
+        '264': {'ext': 'mp4', 'height': 1080, 'resolution': '1080p', 'format_note': 'DASH video', 'preference': -40},
 
         # Dash mp4 audio
 
         # Dash mp4 audio
-        '139': 'm4a',
-        '140': 'm4a',
-        '141': 'm4a',
+        '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
+        '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
+        '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
 
         # Dash webm
 
         # Dash webm
-        '171': 'webm',
-        '172': 'webm',
-        '242': 'webm',
-        '243': 'webm',
-        '244': 'webm',
-        '245': 'webm',
-        '246': 'webm',
-        '247': 'webm',
-        '248': 'webm',
-    }
-    _video_dimensions = {
-        '5': '400x240',
-        '6': '???',
-        '13': '???',
-        '17': '176x144',
-        '18': '640x360',
-        '22': '1280x720',
-        '34': '640x360',
-        '35': '854x480',
-        '36': '320x240',
-        '37': '1920x1080',
-        '38': '4096x3072',
-        '43': '640x360',
-        '44': '854x480',
-        '45': '1280x720',
-        '46': '1920x1080',
-        '82': '360p',
-        '83': '480p',
-        '84': '720p',
-        '85': '1080p',
-        '92': '240p',
-        '93': '360p',
-        '94': '480p',
-        '95': '720p',
-        '96': '1080p',
-        '100': '360p',
-        '101': '480p',
-        '102': '720p',
-        '132': '240p',
-        '151': '72p',
-        '133': '240p',
-        '134': '360p',
-        '135': '480p',
-        '136': '720p',
-        '137': '1080p',
-        '138': '>1080p',
-        '139': '48k',
-        '140': '128k',
-        '141': '256k',
-        '160': '192p',
-        '171': '128k',
-        '172': '256k',
-        '242': '240p',
-        '243': '360p',
-        '244': '480p',
-        '245': '480p',
-        '246': '480p',
-        '247': '720p',
-        '248': '1080p',
-    }
-    _special_itags = {
-        '82': '3D',
-        '83': '3D',
-        '84': '3D',
-        '85': '3D',
-        '100': '3D',
-        '101': '3D',
-        '102': '3D',
-        '133': 'DASH Video',
-        '134': 'DASH Video',
-        '135': 'DASH Video',
-        '136': 'DASH Video',
-        '137': 'DASH Video',
-        '138': 'DASH Video',
-        '139': 'DASH Audio',
-        '140': 'DASH Audio',
-        '141': 'DASH Audio',
-        '160': 'DASH Video',
-        '171': 'DASH Audio',
-        '172': 'DASH Audio',
-        '242': 'DASH Video',
-        '243': 'DASH Video',
-        '244': 'DASH Video',
-        '245': 'DASH Video',
-        '246': 'DASH Video',
-        '247': 'DASH Video',
-        '248': 'DASH Video',
+        '242': {'ext': 'webm', 'height': 240, 'resolution': '240p', 'format_note': 'DASH webm', 'preference': -40},
+        '243': {'ext': 'webm', 'height': 360, 'resolution': '360p', 'format_note': 'DASH webm', 'preference': -40},
+        '244': {'ext': 'webm', 'height': 480, 'resolution': '480p', 'format_note': 'DASH webm', 'preference': -40},
+        '245': {'ext': 'webm', 'height': 480, 'resolution': '480p', 'format_note': 'DASH webm', 'preference': -40},
+        '246': {'ext': 'webm', 'height': 480, 'resolution': '480p', 'format_note': 'DASH webm', 'preference': -40},
+        '247': {'ext': 'webm', 'height': 720, 'resolution': '720p', 'format_note': 'DASH webm', 'preference': -40},
+        '248': {'ext': 'webm', 'height': 1080, 'resolution': '1080p', 'format_note': 'DASH webm', 'preference': -40},
+
+        # Dash webm audio
+        '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 48, 'preference': -50},
+        '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 256, 'preference': -50},
+
+        # RTMP (unnamed)
+        '_rtmp': {'protocol': 'rtmp'},
     }
 
     IE_NAME = u'youtube'
     }
 
     IE_NAME = u'youtube'
@@ -1097,7 +1003,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                 'lang': lang,
                 'v': video_id,
                 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
                 'lang': lang,
                 'v': video_id,
                 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
-                'name': l[0].encode('utf-8'),
+                'name': unescapeHTML(l[0]).encode('utf-8'),
             })
             url = u'http://www.youtube.com/api/timedtext?' + params
             sub_lang_list[lang] = url
             })
             url = u'http://www.youtube.com/api/timedtext?' + params
             sub_lang_list[lang] = url
@@ -1153,13 +1059,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
             self._downloader.report_warning(err_msg)
             return {}
 
             self._downloader.report_warning(err_msg)
             return {}
 
-    def _print_formats(self, formats):
-        print('Available formats:')
-        for x in formats:
-            print('%s\t:\t%s\t[%s]%s' %(x, self._video_extensions.get(x, 'flv'),
-                                        self._video_dimensions.get(x, '???'),
-                                        ' ('+self._special_itags[x]+')' if x in self._special_itags else ''))
-
     def _extract_id(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
     def _extract_id(self, url):
         mobj = re.match(self._VALID_URL, url, re.VERBOSE)
         if mobj is None:
@@ -1172,48 +1071,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
         Transform a dictionary in the format {itag:url} to a list of (itag, url)
         with the requested formats.
         """
         Transform a dictionary in the format {itag:url} to a list of (itag, url)
         with the requested formats.
         """
-        req_format = self._downloader.params.get('format', None)
-        format_limit = self._downloader.params.get('format_limit', None)
-        available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
-        if format_limit is not None and format_limit in available_formats:
-            format_list = available_formats[available_formats.index(format_limit):]
-        else:
-            format_list = available_formats
-        existing_formats = [x for x in format_list if x in url_map]
+        existing_formats = [x for x in self._formats if x in url_map]
         if len(existing_formats) == 0:
             raise ExtractorError(u'no known formats available for video')
         if len(existing_formats) == 0:
             raise ExtractorError(u'no known formats available for video')
-        if self._downloader.params.get('listformats', None):
-            self._print_formats(existing_formats)
-            return
-        if req_format is None or req_format == 'best':
-            video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-        elif req_format == 'worst':
-            video_url_list = [(existing_formats[-1], url_map[existing_formats[-1]])] # worst quality
-        elif req_format in ('-1', 'all'):
-            video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-        else:
-            # Specific formats. We pick the first in a slash-delimeted sequence.
-            # Format can be specified as itag or 'mp4' or 'flv' etc. We pick the highest quality
-            # available in the specified format. For example,
-            # if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
-            # if '1/mp4/3/4' is requested and '1' and '5' (is a mp4) are available, we pick '1'.
-            # if '1/mp4/3/4' is requested and '4' and '5' (is a mp4) are available, we pick '5'.
-            req_formats = req_format.split('/')
-            video_url_list = None
-            for rf in req_formats:
-                if rf in url_map:
-                    video_url_list = [(rf, url_map[rf])]
-                    break
-                if rf in self._video_formats_map:
-                    for srf in self._video_formats_map[rf]:
-                        if srf in url_map:
-                            video_url_list = [(srf, url_map[srf])]
-                            break
-                    else:
-                        continue
-                    break
-            if video_url_list is None:
-                raise ExtractorError(u'requested format not available')
+        video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+        video_url_list.reverse() # order worst to best
         return video_url_list
 
     def _extract_from_m3u8(self, manifest_url, video_id):
         return video_url_list
 
     def _extract_from_m3u8(self, manifest_url, video_id):
@@ -1416,7 +1278,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
 
         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
             self.report_rtmp_download()
 
         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
             self.report_rtmp_download()
-            video_url_list = [(None, video_info['conn'][0])]
+            video_url_list = [('_rtmp', video_info['conn'][0])]
         elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
             encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
             if 'rtmpe%3Dyes' in encoded_url_map:
         elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
             encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
             if 'rtmpe%3Dyes' in encoded_url_map:
@@ -1462,50 +1324,43 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
                         url += '&ratebypass=yes'
                     url_map[url_data['itag'][0]] = url
             video_url_list = self._get_video_url_list(url_map)
                         url += '&ratebypass=yes'
                     url_map[url_data['itag'][0]] = url
             video_url_list = self._get_video_url_list(url_map)
-            if not video_url_list:
-                return
         elif video_info.get('hlsvp'):
             manifest_url = video_info['hlsvp'][0]
             url_map = self._extract_from_m3u8(manifest_url, video_id)
             video_url_list = self._get_video_url_list(url_map)
         elif video_info.get('hlsvp'):
             manifest_url = video_info['hlsvp'][0]
             url_map = self._extract_from_m3u8(manifest_url, video_id)
             video_url_list = self._get_video_url_list(url_map)
-            if not video_url_list:
-                return
-
         else:
             raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         else:
             raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
-        results = []
+        formats = []
         for itag, video_real_url in video_url_list:
         for itag, video_real_url in video_url_list:
-            # Extension
-            video_extension = self._video_extensions.get(itag, 'flv')
-
-            video_format = '{0} - {1}{2}'.format(itag if itag else video_extension,
-                                              self._video_dimensions.get(itag, '???'),
-                                              ' ('+self._special_itags[itag]+')' if itag in self._special_itags else '')
-
-            results.append({
-                'id':       video_id,
-                'url':      video_real_url,
-                'uploader': video_uploader,
-                'uploader_id': video_uploader_id,
-                'upload_date':  upload_date,
-                'title':    video_title,
-                'ext':      video_extension,
-                'format':   video_format,
+            dct = {
                 'format_id': itag,
                 'format_id': itag,
-                'thumbnail':    video_thumbnail,
-                'description':  video_description,
-                'player_url':   player_url,
-                'subtitles':    video_subtitles,
-                'duration':     video_duration,
-                'age_limit':    18 if age_gate else 0,
-                'annotations':  video_annotations,
-                'webpage_url': 'https://www.youtube.com/watch?v=%s' % video_id,
-                'view_count': view_count,
-                'like_count': like_count,
-                'dislike_count': dislike_count,
-            })
-        return results
+                'url': video_real_url,
+                'player_url': player_url,
+            }
+            dct.update(self._formats[itag])
+            formats.append(dct)
+
+        self._sort_formats(formats)
+
+        return {
+            'id':           video_id,
+            'uploader':     video_uploader,
+            'uploader_id':  video_uploader_id,
+            'upload_date':  upload_date,
+            'title':        video_title,
+            'thumbnail':    video_thumbnail,
+            'description':  video_description,
+            'subtitles':    video_subtitles,
+            'duration':     video_duration,
+            'age_limit':    18 if age_gate else 0,
+            'annotations':  video_annotations,
+            'webpage_url': 'https://www.youtube.com/watch?v=%s' % video_id,
+            'view_count':   view_count,
+            'like_count': like_count,
+            'dislike_count': dislike_count,
+            'formats':      formats,
+        }
 
 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
     IE_DESC = u'YouTube.com playlists'
 
 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
     IE_DESC = u'YouTube.com playlists'
@@ -1909,6 +1764,6 @@ class YoutubeTruncatedURLIE(InfoExtractor):
             u'Did you forget to quote the URL? Remember that & is a meta '
             u'character in most shells, so you want to put the URL in quotes, '
             u'like  youtube-dl '
             u'Did you forget to quote the URL? Remember that & is a meta '
             u'character in most shells, so you want to put the URL in quotes, '
             u'like  youtube-dl '
-            u'\'http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc\''
-            u' (or simply  youtube-dl BaW_jenozKc  ).',
+            u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
+            u' or simply  youtube-dl BaW_jenozKc  .',
             expected=True)
             expected=True)
index 35ece354a6ecdf7ba5705184d4ceff22d57eb3d4..829f002cf02f9c908a5057ab3c6b20f520e2f2ea 100644 (file)
@@ -1,10 +1,10 @@
 # coding: utf-8
 
 # coding: utf-8
 
-import operator
 import re
 
 from .common import InfoExtractor
 from ..utils import (
 import re
 
 from .common import InfoExtractor
 from ..utils import (
+    int_or_none,
     unified_strdate,
 )
 
     unified_strdate,
 )
 
@@ -67,29 +67,13 @@ class ZDFIE(InfoExtractor):
             ''', format_id)
 
             ext = format_m.group('container')
             ''', format_id)
 
             ext = format_m.group('container')
-            is_supported = ext != 'f4f'
-
-            PROTO_ORDER = ['http', 'rtmp', 'rtsp']
-            try:
-                proto_pref = -PROTO_ORDER.index(format_m.group('proto'))
-            except ValueError:
-                proto_pref = -999
+            proto = format_m.group('proto').lower()
 
             quality = fnode.find('./quality').text
 
             quality = fnode.find('./quality').text
-            QUALITY_ORDER = ['veryhigh', '300', 'high', 'med', 'low']
-            try:
-                quality_pref = -QUALITY_ORDER.index(quality)
-            except ValueError:
-                quality_pref = -999
-
             abr = int(fnode.find('./audioBitrate').text) // 1000
             vbr = int(fnode.find('./videoBitrate').text) // 1000
             abr = int(fnode.find('./audioBitrate').text) // 1000
             vbr = int(fnode.find('./videoBitrate').text) // 1000
-            pref = (is_available, is_supported,
-                    proto_pref, quality_pref, vbr, abr)
 
             format_note = u''
 
             format_note = u''
-            if not is_supported:
-                format_note += u'(unsupported)'
             if not format_note:
                 format_note = None
 
             if not format_note:
                 format_note = None
 
@@ -101,18 +85,20 @@ class ZDFIE(InfoExtractor):
                 'vcodec': format_m.group('vcodec'),
                 'abr': abr,
                 'vbr': vbr,
                 'vcodec': format_m.group('vcodec'),
                 'abr': abr,
                 'vbr': vbr,
-                'width': int(fnode.find('./width').text),
-                'height': int(fnode.find('./height').text),
-                'filesize': int(fnode.find('./filesize').text),
+                'width': int_or_none(fnode.find('./width').text),
+                'height': int_or_none(fnode.find('./height').text),
+                'filesize': int_or_none(fnode.find('./filesize').text),
                 'format_note': format_note,
                 'format_note': format_note,
-                '_pref': pref,
+                'protocol': proto,
                 '_available': is_available,
             }
 
         format_nodes = doc.findall('.//formitaeten/formitaet')
                 '_available': is_available,
             }
 
         format_nodes = doc.findall('.//formitaeten/formitaet')
-        formats = sorted(filter(lambda f: f['_available'],
-                                map(xml_to_format, format_nodes)),
-                         key=operator.itemgetter('_pref'))
+        formats = list(filter(
+            lambda f: f['_available'],
+            map(xml_to_format, format_nodes)))
+
+        self._sort_formats(formats)
 
         return {
             'id': video_id,
 
         return {
             'id': video_id,
diff --git a/youtube_dl/postprocessor/__init__.py b/youtube_dl/postprocessor/__init__.py
new file mode 100644 (file)
index 0000000..7f19f71
--- /dev/null
@@ -0,0 +1,18 @@
+
+from .ffmpeg import (
+    FFmpegMergerPP,
+    FFmpegMetadataPP,
+    FFmpegVideoConvertor,
+    FFmpegExtractAudioPP,
+    FFmpegEmbedSubtitlePP,
+)
+from .xattrpp import XAttrMetadataPP
+
+__all__ = [
+    'FFmpegMergerPP',
+    'FFmpegMetadataPP',
+    'FFmpegVideoConvertor',
+    'FFmpegExtractAudioPP',
+    'FFmpegEmbedSubtitlePP',
+    'XAttrMetadataPP',
+]
diff --git a/youtube_dl/postprocessor/common.py b/youtube_dl/postprocessor/common.py
new file mode 100644 (file)
index 0000000..788f94d
--- /dev/null
@@ -0,0 +1,49 @@
+from ..utils import PostProcessingError
+
+
+class PostProcessor(object):
+    """Post Processor class.
+
+    PostProcessor objects can be added to downloaders with their
+    add_post_processor() method. When the downloader has finished a
+    successful download, it will take its internal chain of PostProcessors
+    and start calling the run() method on each one of them, first with
+    an initial argument and then with the returned value of the previous
+    PostProcessor.
+
+    The chain will be stopped if one of them ever returns None or the end
+    of the chain is reached.
+
+    PostProcessor objects follow a "mutual registration" process similar
+    to InfoExtractor objects.
+    """
+
+    _downloader = None
+
+    def __init__(self, downloader=None):
+        self._downloader = downloader
+
+    def set_downloader(self, downloader):
+        """Sets the downloader for this PP."""
+        self._downloader = downloader
+
+    def run(self, information):
+        """Run the PostProcessor.
+
+        The "information" argument is a dictionary like the ones
+        composed by InfoExtractors. The only difference is that this
+        one has an extra field called "filepath" that points to the
+        downloaded file.
+
+        This method returns a tuple, the first element of which describes
+        whether the original file should be kept (i.e. not deleted - None for
+        no preference), and the second of which is the updated information.
+
+        In addition, this method may raise a PostProcessingError
+        exception if post processing fails.
+        """
+        return None, information  # by default, keep file and do nothing
+
+
+class AudioConversionError(PostProcessingError):
+    pass
similarity index 80%
rename from youtube_dl/PostProcessor.py
rename to youtube_dl/postprocessor/ffmpeg.py
index 69aedf87a44c72060e2af135cd95f6f820e9ab0c..8c19ed7fa1fb833103c519754499a44e9bf3a76b 100644 (file)
@@ -4,64 +4,23 @@ import sys
 import time
 
 
 import time
 
 
-from .utils import (
+from .common import AudioConversionError, PostProcessor
+
+from ..utils import (
+    check_executable,
     compat_subprocess_get_DEVNULL,
     encodeFilename,
     PostProcessingError,
     compat_subprocess_get_DEVNULL,
     encodeFilename,
     PostProcessingError,
+    prepend_extension,
     shell_quote,
     subtitles_filename,
 )
 
 
     shell_quote,
     subtitles_filename,
 )
 
 
-class PostProcessor(object):
-    """Post Processor class.
-
-    PostProcessor objects can be added to downloaders with their
-    add_post_processor() method. When the downloader has finished a
-    successful download, it will take its internal chain of PostProcessors
-    and start calling the run() method on each one of them, first with
-    an initial argument and then with the returned value of the previous
-    PostProcessor.
-
-    The chain will be stopped if one of them ever returns None or the end
-    of the chain is reached.
-
-    PostProcessor objects follow a "mutual registration" process similar
-    to InfoExtractor objects.
-    """
-
-    _downloader = None
-
-    def __init__(self, downloader=None):
-        self._downloader = downloader
-
-    def set_downloader(self, downloader):
-        """Sets the downloader for this PP."""
-        self._downloader = downloader
-
-    def run(self, information):
-        """Run the PostProcessor.
-
-        The "information" argument is a dictionary like the ones
-        composed by InfoExtractors. The only difference is that this
-        one has an extra field called "filepath" that points to the
-        downloaded file.
-
-        This method returns a tuple, the first element of which describes
-        whether the original file should be kept (i.e. not deleted - None for
-        no preference), and the second of which is the updated information.
-
-        In addition, this method may raise a PostProcessingError
-        exception if post processing fails.
-        """
-        return None, information # by default, keep file and do nothing
 
 class FFmpegPostProcessorError(PostProcessingError):
     pass
 
 
 class FFmpegPostProcessorError(PostProcessingError):
     pass
 
-class AudioConversionError(PostProcessingError):
-    pass
-
 class FFmpegPostProcessor(PostProcessor):
     def __init__(self,downloader=None):
         PostProcessor.__init__(self, downloader)
 class FFmpegPostProcessor(PostProcessor):
     def __init__(self,downloader=None):
         PostProcessor.__init__(self, downloader)
@@ -69,25 +28,28 @@ class FFmpegPostProcessor(PostProcessor):
 
     @staticmethod
     def detect_executables():
 
     @staticmethod
     def detect_executables():
-        def executable(exe):
-            try:
-                subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
-            except OSError:
-                return False
-            return exe
         programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
         programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
-        return dict((program, executable(program)) for program in programs)
+        return dict((program, check_executable(program, ['-version'])) for program in programs)
+
+    def _get_executable(self):
+        if self._downloader.params.get('prefer_ffmpeg', False):
+            return self._exes['ffmpeg'] or self._exes['avconv']
+        else:
+            return self._exes['avconv'] or self._exes['ffmpeg']
+
+    def _uses_avconv(self):
+        return self._get_executable() == self._exes['avconv']
 
     def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
 
     def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
-        if not self._exes['ffmpeg'] and not self._exes['avconv']:
+        if not self._get_executable():
             raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
 
         files_cmd = []
         for path in input_paths:
             raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
 
         files_cmd = []
         for path in input_paths:
-            files_cmd.extend(['-i', encodeFilename(path)])
-        cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y'] + files_cmd
+            files_cmd.extend(['-i', encodeFilename(path, True)])
+        cmd = ([self._get_executable(), '-y'] + files_cmd
                + opts +
                + opts +
-               [encodeFilename(self._ffmpeg_filename_argument(out_path))])
+               [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
 
         if self._downloader.params.get('verbose', False):
             self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
 
         if self._downloader.params.get('verbose', False):
             self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd))
@@ -107,6 +69,7 @@ class FFmpegPostProcessor(PostProcessor):
             return u'./' + fn
         return fn
 
             return u'./' + fn
         return fn
 
+
 class FFmpegExtractAudioPP(FFmpegPostProcessor):
     def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
         FFmpegPostProcessor.__init__(self, downloader)
 class FFmpegExtractAudioPP(FFmpegPostProcessor):
     def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
         FFmpegPostProcessor.__init__(self, downloader)
@@ -120,7 +83,10 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         if not self._exes['ffprobe'] and not self._exes['avprobe']:
             raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
         try:
         if not self._exes['ffprobe'] and not self._exes['avprobe']:
             raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
         try:
-            cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
+            cmd = [
+                self._exes['avprobe'] or self._exes['ffprobe'],
+                '-show_streams',
+                encodeFilename(self._ffmpeg_filename_argument(path), True)]
             handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
             output = handle.communicate()[0]
             if handle.wait() != 0:
             handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
             output = handle.communicate()[0]
             if handle.wait() != 0:
@@ -136,8 +102,6 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         return None
 
     def run_ffmpeg(self, path, out_path, codec, more_opts):
         return None
 
     def run_ffmpeg(self, path, out_path, codec, more_opts):
-        if not self._exes['ffmpeg'] and not self._exes['avconv']:
-            raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
         if codec is None:
             acodec_opts = []
         else:
         if codec is None:
             acodec_opts = []
         else:
@@ -155,13 +119,14 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         if filecodec is None:
             raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
 
         if filecodec is None:
             raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
 
+        uses_avconv = self._uses_avconv()
         more_opts = []
         if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
             if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
                 # Lossless, but in another container
                 acodec = 'copy'
                 extension = 'm4a'
         more_opts = []
         if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
             if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
                 # Lossless, but in another container
                 acodec = 'copy'
                 extension = 'm4a'
-                more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
+                more_opts = ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc']
             elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
                 # Lossless if possible
                 acodec = 'copy'
             elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
                 # Lossless if possible
                 acodec = 'copy'
@@ -177,9 +142,9 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
                 more_opts = []
                 if self._preferredquality is not None:
                     if int(self._preferredquality) < 10:
                 more_opts = []
                 if self._preferredquality is not None:
                     if int(self._preferredquality) < 10:
-                        more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+                        more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality]
                     else:
                     else:
-                        more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
+                        more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k']
         else:
             # We convert the audio (lossy)
             acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
         else:
             # We convert the audio (lossy)
             acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
@@ -188,13 +153,13 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
             if self._preferredquality is not None:
                 # The opus codec doesn't support the -aq option
                 if int(self._preferredquality) < 10 and extension != 'opus':
             if self._preferredquality is not None:
                 # The opus codec doesn't support the -aq option
                 if int(self._preferredquality) < 10 and extension != 'opus':
-                    more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+                    more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality]
                 else:
                 else:
-                    more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
+                    more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k']
             if self._preferredcodec == 'aac':
                 more_opts += ['-f', 'adts']
             if self._preferredcodec == 'm4a':
             if self._preferredcodec == 'aac':
                 more_opts += ['-f', 'adts']
             if self._preferredcodec == 'm4a':
-                more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
+                more_opts += ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc']
             if self._preferredcodec == 'vorbis':
                 extension = 'ogg'
             if self._preferredcodec == 'wav':
             if self._preferredcodec == 'vorbis':
                 extension = 'ogg'
             if self._preferredcodec == 'wav':
@@ -212,14 +177,14 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
             if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
                 self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
             else:
             if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
                 self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
             else:
-                self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
+                self._downloader.to_screen(u'[' + self._get_executable() + '] Destination: ' + new_path)
                 self.run_ffmpeg(path, new_path, acodec, more_opts)
         except:
             etype,e,tb = sys.exc_info()
             if isinstance(e, AudioConversionError):
                 msg = u'audio conversion failed: ' + e.msg
             else:
                 self.run_ffmpeg(path, new_path, acodec, more_opts)
         except:
             etype,e,tb = sys.exc_info()
             if isinstance(e, AudioConversionError):
                 msg = u'audio conversion failed: ' + e.msg
             else:
-                msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
+                msg = u'error running ' + self._get_executable()
             raise PostProcessingError(msg)
 
         # Try to update the date time for extracted audio file.
             raise PostProcessingError(msg)
 
         # Try to update the date time for extracted audio file.
@@ -232,6 +197,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
         information['filepath'] = new_path
         return self._nopostoverwrites,information
 
         information['filepath'] = new_path
         return self._nopostoverwrites,information
 
+
 class FFmpegVideoConvertor(FFmpegPostProcessor):
     def __init__(self, downloader=None,preferedformat=None):
         super(FFmpegVideoConvertor, self).__init__(downloader)
 class FFmpegVideoConvertor(FFmpegPostProcessor):
     def __init__(self, downloader=None,preferedformat=None):
         super(FFmpegVideoConvertor, self).__init__(downloader)
@@ -496,16 +462,23 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
             return True, info
 
         filename = info['filepath']
             return True, info
 
         filename = info['filepath']
-        ext = os.path.splitext(filename)[1][1:]
-        temp_filename = filename + u'.temp'
+        temp_filename = prepend_extension(filename, 'temp')
 
         options = ['-c', 'copy']
         for (name, value) in metadata.items():
             options.extend(['-metadata', '%s=%s' % (name, value)])
 
         options = ['-c', 'copy']
         for (name, value) in metadata.items():
             options.extend(['-metadata', '%s=%s' % (name, value)])
-        options.extend(['-f', ext])
 
         self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
         return True, info
 
         self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename)
         self.run_ffmpeg(filename, temp_filename, options)
         os.remove(encodeFilename(filename))
         os.rename(encodeFilename(temp_filename), encodeFilename(filename))
         return True, info
+
+
+class FFmpegMergerPP(FFmpegPostProcessor):
+    def run(self, info):
+        filename = info['filepath']
+        args = ['-c', 'copy']
+        self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
+        return True, info
+
diff --git a/youtube_dl/postprocessor/xattrpp.py b/youtube_dl/postprocessor/xattrpp.py
new file mode 100644 (file)
index 0000000..1897924
--- /dev/null
@@ -0,0 +1,108 @@
+import os
+import subprocess
+import sys
+
+from .common import PostProcessor
+from ..utils import (
+    check_executable,
+    hyphenate_date,
+)
+
+
+class XAttrMetadataPP(PostProcessor):
+
+    #
+    # More info about extended attributes for media:
+    #   http://freedesktop.org/wiki/CommonExtendedAttributes/
+    #   http://www.freedesktop.org/wiki/PhreedomDraft/
+    #   http://dublincore.org/documents/usageguide/elements.shtml
+    #
+    # TODO:
+    #  * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated)
+    #  * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution'
+    #
+
+    def run(self, info):
+        """ Set extended attributes on downloaded file (if xattr support is found). """
+
+        # This mess below finds the best xattr tool for the job and creates a
+        # "write_xattr" function.
+        try:
+            # try the pyxattr module...
+            import xattr
+
+            def write_xattr(path, key, value):
+                return xattr.setxattr(path, key, value)
+
+        except ImportError:
+            if os.name == 'nt':
+                # Write xattrs to NTFS Alternate Data Streams:
+                # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
+                def write_xattr(path, key, value):
+                    assert ':' not in key
+                    assert os.path.exists(path)
+
+                    ads_fn = path + ":" + key
+                    with open(ads_fn, "wb") as f:
+                        f.write(value)
+            else:
+                user_has_setfattr = check_executable("setfattr", ['--version'])
+                user_has_xattr = check_executable("xattr", ['-h'])
+
+                if user_has_setfattr or user_has_xattr:
+
+                    def write_xattr(path, key, value):
+                        if user_has_setfattr:
+                            cmd = ['setfattr', '-n', key, '-v', value, path]
+                        elif user_has_xattr:
+                            cmd = ['xattr', '-w', key, value, path]
+
+                        subprocess.check_output(cmd)
+
+                else:
+                    # On Unix, and can't find pyxattr, setfattr, or xattr.
+                    if sys.platform.startswith('linux'):
+                        self._downloader.report_error(
+                            "Couldn't find a tool to set the xattrs. "
+                            "Install either the python 'pyxattr' or 'xattr' "
+                            "modules, or the GNU 'attr' package "
+                            "(which contains the 'setfattr' tool).")
+                    else:
+                        self._downloader.report_error(
+                            "Couldn't find a tool to set the xattrs. "
+                            "Install either the python 'xattr' module, "
+                            "or the 'xattr' binary.")
+
+        # Write the metadata to the file's xattrs
+        self._downloader.to_screen('[metadata] Writing metadata to file\'s xattrs')
+
+        filename = info['filepath']
+
+        try:
+            xattr_mapping = {
+                'user.xdg.referrer.url': 'webpage_url',
+                # 'user.xdg.comment':            'description',
+                'user.dublincore.title': 'title',
+                'user.dublincore.date': 'upload_date',
+                'user.dublincore.description': 'description',
+                'user.dublincore.contributor': 'uploader',
+                'user.dublincore.format': 'format',
+            }
+
+            for xattrname, infoname in xattr_mapping.items():
+
+                value = info.get(infoname)
+
+                if value:
+                    if infoname == "upload_date":
+                        value = hyphenate_date(value)
+
+                    byte_value = value.encode('utf-8')
+                    write_xattr(filename, xattrname, byte_value)
+
+            return True, info
+
+        except (subprocess.CalledProcessError, OSError):
+            self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
+            return False, info
+
index 2e48f187e665dad81caa663efdb9d0c33f088936..73fe1ad0a3a27165d3dffc61927733beb9c5ed33 100644 (file)
@@ -500,12 +500,13 @@ def unescapeHTML(s):
     result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
     return result
 
     result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
     return result
 
-def encodeFilename(s):
+
+def encodeFilename(s, for_subprocess=False):
     """
     @param s The name of the file
     """
 
     """
     @param s The name of the file
     """
 
-    assert type(s) == type(u'')
+    assert type(s) == compat_str
 
     # Python 3 has a Unicode API
     if sys.version_info >= (3, 0):
 
     # Python 3 has a Unicode API
     if sys.version_info >= (3, 0):
@@ -515,12 +516,18 @@ def encodeFilename(s):
         # Pass u'' directly to use Unicode APIs on Windows 2000 and up
         # (Detecting Windows NT 4 is tricky because 'major >= 4' would
         # match Windows 9x series as well. Besides, NT 4 is obsolete.)
         # Pass u'' directly to use Unicode APIs on Windows 2000 and up
         # (Detecting Windows NT 4 is tricky because 'major >= 4' would
         # match Windows 9x series as well. Besides, NT 4 is obsolete.)
-        return s
+        if not for_subprocess:
+            return s
+        else:
+            # For subprocess calls, encode with locale encoding
+            # Refer to http://stackoverflow.com/a/9951851/35070
+            encoding = preferredencoding()
     else:
         encoding = sys.getfilesystemencoding()
     else:
         encoding = sys.getfilesystemencoding()
-        if encoding is None:
-            encoding = 'utf-8'
-        return s.encode(encoding, 'ignore')
+    if encoding is None:
+        encoding = 'utf-8'
+    return s.encode(encoding, 'ignore')
+
 
 def decodeOption(optval):
     if optval is None:
 
 def decodeOption(optval):
     if optval is None:
@@ -539,7 +546,8 @@ def formatSeconds(secs):
     else:
         return '%d' % secs
 
     else:
         return '%d' % secs
 
-def make_HTTPS_handler(opts_no_check_certificate):
+
+def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
     if sys.version_info < (3, 2):
         import httplib
 
     if sys.version_info < (3, 2):
         import httplib
 
@@ -560,7 +568,7 @@ def make_HTTPS_handler(opts_no_check_certificate):
         class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler):
             def https_open(self, req):
                 return self.do_open(HTTPSConnectionV3, req)
         class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler):
             def https_open(self, req):
                 return self.do_open(HTTPSConnectionV3, req)
-        return HTTPSHandlerV3()
+        return HTTPSHandlerV3(**kwargs)
     else:
         context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
         context.verify_mode = (ssl.CERT_NONE
     else:
         context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
         context.verify_mode = (ssl.CERT_NONE
@@ -571,7 +579,7 @@ def make_HTTPS_handler(opts_no_check_certificate):
             context.load_default_certs()
         except AttributeError:
             pass  # Python < 3.4
             context.load_default_certs()
         except AttributeError:
             pass  # Python < 3.4
-        return compat_urllib_request.HTTPSHandler(context=context)
+        return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
 
 class ExtractorError(Exception):
     """Error during info extraction."""
 
 class ExtractorError(Exception):
     """Error during info extraction."""
@@ -756,6 +764,7 @@ def unified_strdate(date_str):
         '%Y-%m-%d',
         '%d/%m/%Y',
         '%Y/%m/%d %H:%M:%S',
         '%Y-%m-%d',
         '%d/%m/%Y',
         '%Y/%m/%d %H:%M:%S',
+        '%Y-%m-%d %H:%M:%S',
         '%d.%m.%Y %H:%M',
         '%Y-%m-%dT%H:%M:%SZ',
         '%Y-%m-%dT%H:%M:%S.%fZ',
         '%d.%m.%Y %H:%M',
         '%Y-%m-%dT%H:%M:%SZ',
         '%Y-%m-%dT%H:%M:%S.%fZ',
@@ -809,6 +818,15 @@ def date_from_str(date_str):
         return today + delta
     return datetime.datetime.strptime(date_str, "%Y%m%d").date()
     
         return today + delta
     return datetime.datetime.strptime(date_str, "%Y%m%d").date()
     
+def hyphenate_date(date_str):
+    """
+    Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
+    match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
+    if match is not None:
+        return '-'.join(match.groups())
+    else:
+        return date_str
+
 class DateRange(object):
     """Represents a time interval between two dates"""
     def __init__(self, start=None, end=None):
 class DateRange(object):
     """Represents a time interval between two dates"""
     def __init__(self, start=None, end=None):
@@ -849,12 +867,22 @@ def platform_name():
 def write_string(s, out=None):
     if out is None:
         out = sys.stderr
 def write_string(s, out=None):
     if out is None:
         out = sys.stderr
-    assert type(s) == type(u'')
+    assert type(s) == compat_str
 
     if ('b' in getattr(out, 'mode', '') or
             sys.version_info[0] < 3):  # Python 2 lies about mode of sys.stderr
         s = s.encode(preferredencoding(), 'ignore')
 
     if ('b' in getattr(out, 'mode', '') or
             sys.version_info[0] < 3):  # Python 2 lies about mode of sys.stderr
         s = s.encode(preferredencoding(), 'ignore')
-    out.write(s)
+    try:
+        out.write(s)
+    except UnicodeEncodeError:
+        # In Windows shells, this can fail even when the codec is just charmap!?
+        # See https://wiki.python.org/moin/PrintFails#Issue
+        if sys.platform == 'win32' and hasattr(out, 'encoding'):
+            s = s.encode(out.encoding, 'ignore').decode(out.encoding)
+            out.write(s)
+        else:
+            raise
+
     out.flush()
 
 
     out.flush()
 
 
@@ -1008,9 +1036,9 @@ def smuggle_url(url, data):
     return url + u'#' + sdata
 
 
     return url + u'#' + sdata
 
 
-def unsmuggle_url(smug_url):
+def unsmuggle_url(smug_url, default=None):
     if not '#__youtubedl_smuggle' in smug_url:
     if not '#__youtubedl_smuggle' in smug_url:
-        return smug_url, None
+        return smug_url, default
     url, _, sdata = smug_url.rpartition(u'#')
     jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
     data = json.loads(jsond)
     url, _, sdata = smug_url.rpartition(u'#')
     jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
     data = json.loads(jsond)
@@ -1070,7 +1098,7 @@ def fix_xml_all_ampersand(xml_str):
 
 
 def setproctitle(title):
 
 
 def setproctitle(title):
-    assert isinstance(title, type(u''))
+    assert isinstance(title, compat_str)
     try:
         libc = ctypes.cdll.LoadLibrary("libc.so.6")
     except OSError:
     try:
         libc = ctypes.cdll.LoadLibrary("libc.so.6")
     except OSError:
@@ -1098,3 +1126,38 @@ def url_basename(url):
 class HEADRequest(compat_urllib_request.Request):
     def get_method(self):
         return "HEAD"
 class HEADRequest(compat_urllib_request.Request):
     def get_method(self):
         return "HEAD"
+
+
+def int_or_none(v):
+    return v if v is None else int(v)
+
+
+def parse_duration(s):
+    if s is None:
+        return None
+
+    m = re.match(
+        r'(?:(?:(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)$', s)
+    if not m:
+        return None
+    res = int(m.group('secs'))
+    if m.group('mins'):
+        res += int(m.group('mins')) * 60
+        if m.group('hours'):
+            res += int(m.group('hours')) * 60 * 60
+    return res
+
+
+def prepend_extension(filename, ext):
+    name, real_ext = os.path.splitext(filename) 
+    return u'{0}.{1}{2}'.format(name, ext, real_ext)
+
+
+def check_executable(exe, args=[]):
+    """ Checks if the given binary is installed somewhere in PATH, and returns its name.
+    args can be a list of arguments for a short output (like -version) """
+    try:
+        subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+    except OSError:
+        return False
+    return exe
index 24855bceb5094ed3e31175dfd901073f2624bade..b9c25c4a964e493521c19cfbe05df283c55f5875 100644 (file)
@@ -1,2 +1,2 @@
 
 
-__version__ = '2013.12.23'
+__version__ = '2014.01.17.2'