include test/*.py
include test/*.json
include youtube-dl.bash-completion
+include youtube-dl.fish
include youtube-dl.1
recursive-include docs Makefile conf.py *.rst
-all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
+all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
clean:
- rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
+ rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part
cleanall: clean
rm -f youtube-dl youtube-dl.exe
-PREFIX=/usr/local
-BINDIR=$(PREFIX)/bin
-MANDIR=$(PREFIX)/man
-PYTHON=/usr/bin/env python
+PREFIX ?= /usr/local
+BINDIR ?= $(PREFIX)/bin
+MANDIR ?= $(PREFIX)/man
+SHAREDIR ?= $(PREFIX)/share
+PYTHON ?= /usr/bin/env python
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
ifeq ($(PREFIX),/usr)
endif
endif
-install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
+install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
install -d $(DESTDIR)$(BINDIR)
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
install -d $(DESTDIR)$(MANDIR)/man1
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
+ install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
+ install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
+ install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
+ install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
test:
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
tar: youtube-dl.tar.gz
-.PHONY: all clean install test tar bash-completion pypi-files
+.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
-pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
+pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
bash-completion: youtube-dl.bash-completion
-youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
+youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
+ python devscripts/zsh-completion.py
+
+zsh-completion: youtube-dl.zsh
+
+youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
+ python devscripts/fish-completion.py
+
+fish-completion: youtube-dl.fish
+
+youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
--exclude '*.DS_Store' \
--exclude '*.kate-swp' \
-- \
bin devscripts test youtube_dl docs \
LICENSE README.md README.txt \
- Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
+ Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
+ youtube-dl.zsh youtube-dl.fish setup.py \
youtube-dl
Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
+OS X users can install **youtube-dl** with [Homebrew](http://brew.sh/).
+
+ brew install youtube-dl
+
+You can also use pip:
+
+ sudo pip install youtube-dl
+
Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
# DESCRIPTION
configuration in ~/.config/youtube-dl.conf
(%APPDATA%/youtube-dl/config.txt on
Windows)
+ --flat-playlist Do not extract the videos of a playlist,
+ only list them.
## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1)
downloaded videos in it.
--include-ads Download advertisements as well
(experimental)
- --youtube-include-dash-manifest Try to download the DASH manifest on
- YouTube videos (experimental)
## Download Options:
-r, --rate-limit LIMIT maximum download rate in bytes per second
downloads if possible.
--no-continue do not resume partially downloaded files
(restart from beginning)
- --no-part do not use .part files
+ --no-part do not use .part files - write directly
+ into output file
--no-mtime do not use the Last-modified header to set
the file modification time
--write-description write video description to a .description
-j, --dump-json simulate, quiet but print JSON information.
See --output for a description of available
keys.
+ -J, --dump-single-json simulate, quiet but print JSON information
+ for each command-line argument. If the URL
+ refers to a playlist, dump the whole
+ playlist information in a single line.
--newline output progress bar as new lines
--no-progress do not print progress bar
--console-title display progress in console titlebar
information about the video. (Currently
supported only for YouTube)
--user-agent UA specify a custom user agent
- --referer REF specify a custom referer, use if the video
+ --referer URL specify a custom referer, use if the video
access is restricted to one domain
--add-header FIELD:VALUE specify a custom HTTP header and its value,
separated by a colon ':'. You can use this
## Video Format Options:
-f, --format FORMAT video format code, specify the order of
- preference using slashes: "-f 22/17/18".
- "-f mp4" and "-f flv" are also supported.
- You can also use the special names "best",
- "bestvideo", "bestaudio", "worst",
- "worstvideo" and "worstaudio". By default,
- youtube-dl will pick the best quality.
+ preference using slashes: -f 22/17/18 . -f
+ mp4 , -f m4a and -f flv are also
+ supported. You can also use the special
+ names "best", "bestvideo", "bestaudio",
+ "worst", "worstvideo" and "worstaudio". By
+ default, youtube-dl will pick the best
+ quality. Use commas to download multiple
+ audio formats, such as -f
+ 136/137/mp4/bestvideo,140/m4a/bestaudio
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific
one is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats
+ --youtube-skip-dash-manifest Do not download the DASH manifest on
+ YouTube videos
## Subtitle Options:
--write-sub write subtitle file
language tags like 'en,pt'
## Authentication Options:
- -u, --username USERNAME account username
+ -u, --username USERNAME login with this account ID
-p, --password PASSWORD account password
+ -2, --twofactor TWOFACTOR two-factor auth code
-n, --netrc use .netrc authentication data
--video-password PASSWORD video password (vimeo, smotri)
(requires ffmpeg or avconv and ffprobe or
avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
- "opus", or "wav"; best by default
+ "opus", or "wav"; "best" by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
insert a value between 0 (better) and 9
(worse) for VBR or a specific bitrate like
postprocessors (default)
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
postprocessors
+ --exec CMD Execute a command on the file after
+ downloading, similar to find's -exec
+ syntax. Example: --exec 'adb push {}
+ /sdcard/Music/ && rm {}'
# CONFIGURATION
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
- $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
- youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
- $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
- youtube-dl_test_video_.mp4 # A simple file name
+```bash
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
+youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
+youtube-dl_test_video_.mp4 # A simple file name
+```
# VIDEO SELECTION
Examples:
- # Download only the videos uploaded in the last 6 months
- $ youtube-dl --dateafter now-6months
+```bash
+# Download only the videos uploaded in the last 6 months
+$ youtube-dl --dateafter now-6months
- # Download only the videos uploaded on January 1, 1970
- $ youtube-dl --date 19700101
+# Download only the videos uploaded on January 1, 1970
+$ youtube-dl --date 19700101
- $ # will only download the videos uploaded in the 200x decade
- $ youtube-dl --dateafter 20000101 --datebefore 20091231
+$ # will only download the videos uploaded in the 200x decade
+$ youtube-dl --dateafter 20000101 --datebefore 20091231
+```
# FAQ
+### How do I update youtube-dl?
+
+If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
+
+If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
+
+If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
+
+As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
+
+ sudo apt-get remove -y youtube-dl
+
+Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
+
+```
+sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
+sudo chmod a+x /usr/local/bin/youtube-dl
+hash -r
+```
+
+Again, from then on you'll be able to update with `sudo youtube-dl -U`.
+
+### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
+
+YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
+
+If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
+
+### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
+
+By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
+
### Can you please put the -b option back?
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
-
- # coding: utf-8
- from __future__ import unicode_literals
-
- import re
-
- from .common import InfoExtractor
-
-
- class YourExtractorIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
- _TEST = {
- 'url': 'http://yourextractor.com/watch/42',
- 'md5': 'TODO: md5 sum of the first 10KiB of the video file',
- 'info_dict': {
- 'id': '42',
- 'ext': 'mp4',
- 'title': 'Video title goes here',
- # TODO more properties, either as:
- # * A value
- # * MD5 checksum; start the string with md5:
- # * A regular expression; start the string with re:
- # * Any Python type (for example int or float)
- }
+ ```python
+ # coding: utf-8
+ from __future__ import unicode_literals
+
+ from .common import InfoExtractor
+
+
+ class YourExtractorIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://yourextractor.com/watch/42',
+ 'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+ 'info_dict': {
+ 'id': '42',
+ 'ext': 'mp4',
+ 'title': 'Video title goes here',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ # TODO more properties, either as:
+ # * A value
+ # * MD5 checksum; start the string with md5:
+ # * A regular expression; start the string with re:
+ # * Any Python type (for example int or float)
}
+ }
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- # TODO more code goes here, for example ...
- webpage = self._download_webpage(url, video_id)
- title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
-
- return {
- 'id': video_id,
- 'title': title,
- # TODO more properties (see youtube_dl/extractor/common.py)
- }
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ # TODO more code goes here, for example ...
+ webpage = self._download_webpage(url, video_id)
+ title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+ return {
+ 'id': video_id,
+ 'title': title,
+ # TODO more properties (see youtube_dl/extractor/common.py)
+ }
+ ```
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
In any case, thank you very much for your contributions!
+# EMBEDDING YOUTUBE-DL
+
+youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
+
+From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
+
+ import youtube_dl
+
+ ydl_opts = {}
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+ ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
+
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+
# BUGS
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
Windows users can download a .exe file and place it in their home
directory or any other location on their PATH.
+OS X users can install youtube-dl with Homebrew.
+
+ brew install youtube-dl
+
+You can also use pip:
+
+ sudo pip install youtube-dl
+
Alternatively, refer to the developer instructions below for how to
check out and work with the git repository. For further options,
including PGP signatures, see
configuration in ~/.config/youtube-dl.conf
(%APPDATA%/youtube-dl/config.txt on
Windows)
+ --flat-playlist Do not extract the videos of a playlist,
+ only list them.
Video Selection:
----------------
downloaded videos in it.
--include-ads Download advertisements as well
(experimental)
- --youtube-include-dash-manifest Try to download the DASH manifest on
- YouTube videos (experimental)
Download Options:
-----------------
downloads if possible.
--no-continue do not resume partially downloaded files
(restart from beginning)
- --no-part do not use .part files
+ --no-part do not use .part files - write directly
+ into output file
--no-mtime do not use the Last-modified header to set
the file modification time
--write-description write video description to a .description
-j, --dump-json simulate, quiet but print JSON information.
See --output for a description of available
keys.
+ -J, --dump-single-json simulate, quiet but print JSON information
+ for each command-line argument. If the URL
+ refers to a playlist, dump the whole
+ playlist information in a single line.
--newline output progress bar as new lines
--no-progress do not print progress bar
--console-title display progress in console titlebar
information about the video. (Currently
supported only for YouTube)
--user-agent UA specify a custom user agent
- --referer REF specify a custom referer, use if the video
+ --referer URL specify a custom referer, use if the video
access is restricted to one domain
--add-header FIELD:VALUE specify a custom HTTP header and its value,
separated by a colon ':'. You can use this
---------------------
-f, --format FORMAT video format code, specify the order of
- preference using slashes: "-f 22/17/18".
- "-f mp4" and "-f flv" are also supported.
- You can also use the special names "best",
- "bestvideo", "bestaudio", "worst",
- "worstvideo" and "worstaudio". By default,
- youtube-dl will pick the best quality.
+ preference using slashes: -f 22/17/18 . -f
+ mp4 , -f m4a and -f flv are also
+ supported. You can also use the special
+ names "best", "bestvideo", "bestaudio",
+ "worst", "worstvideo" and "worstaudio". By
+ default, youtube-dl will pick the best
+ quality. Use commas to download multiple
+ audio formats, such as -f
+ 136/137/mp4/bestvideo,140/m4a/bestaudio
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific
one is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats
+ --youtube-skip-dash-manifest Do not download the DASH manifest on
+ YouTube videos
Subtitle Options:
-----------------
Authentication Options:
-----------------------
- -u, --username USERNAME account username
+ -u, --username USERNAME login with this account ID
-p, --password PASSWORD account password
+ -2, --twofactor TWOFACTOR two-factor auth code
-n, --netrc use .netrc authentication data
--video-password PASSWORD video password (vimeo, smotri)
(requires ffmpeg or avconv and ffprobe or
avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
- "opus", or "wav"; best by default
+ "opus", or "wav"; "best" by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
insert a value between 0 (better) and 9
(worse) for VBR or a specific bitrate like
postprocessors (default)
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
postprocessors
+ --exec CMD Execute a command on the file after
+ downloading, similar to find's -exec
+ syntax. Example: --exec 'adb push {}
+ /sdcard/Music/ && rm {}'
CONFIGURATION
=============
or the filename through an 8bit-unsafe channel. In these cases, add the
--restrict-filenames flag to get a shorter title:
- $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
- youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
- $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
- youtube-dl_test_video_.mp4 # A simple file name
+``` {.bash}
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
+youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
+$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
+youtube-dl_test_video_.mp4 # A simple file name
+```
VIDEO SELECTION
===============
Examples:
- # Download only the videos uploaded in the last 6 months
- $ youtube-dl --dateafter now-6months
+``` {.bash}
+# Download only the videos uploaded in the last 6 months
+$ youtube-dl --dateafter now-6months
- # Download only the videos uploaded on January 1, 1970
- $ youtube-dl --date 19700101
+# Download only the videos uploaded on January 1, 1970
+$ youtube-dl --date 19700101
- $ # will only download the videos uploaded in the 200x decade
- $ youtube-dl --dateafter 20000101 --datebefore 20091231
+$ # will only download the videos uploaded in the 200x decade
+$ youtube-dl --dateafter 20000101 --datebefore 20091231
+```
FAQ
===
+How do I update youtube-dl?
+
+If you've followed our manual installation instructions, you can simply
+run youtube-dl -U (or, on Linux, sudo youtube-dl -U).
+
+If you have used pip, a simple sudo pip install -U youtube-dl is
+sufficient to update.
+
+If you have installed youtube-dl using a package manager like apt-get or
+yum, use the standard system update mechanism to update. Note that
+distribution packages are often outdated. As a rule of thumb, youtube-dl
+releases at least once a month, and often weekly or even daily. Simply
+go to http://yt-dl.org/ to find out the current version. Unfortunately,
+there is nothing we youtube-dl developers can do if your distributions
+serves a really outdated version. You can (and should) complain to your
+distribution in their bugtracker or support forum.
+
+As a last resort, you can also uninstall the version installed by your
+package manager and follow our manual installation instructions. For
+that, remove the distribution's package, with a line like
+
+ sudo apt-get remove -y youtube-dl
+
+Afterwards, simply follow our manual installation instructions:
+
+ sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
+ sudo chmod a+x /usr/local/bin/youtube-dl
+ hash -r
+
+Again, from then on you'll be able to update with sudo youtube-dl -U.
+
+I'm getting an error Unable to extract OpenGraph title on YouTube playlists
+
+YouTube changed their playlist format in March 2014 and later on, so
+you'll need at least youtube-dl 2014.07.25 to download all YouTube
+videos.
+
+If you have installed youtube-dl with a package manager, pip, setup.py
+or a tarball, please use that to update. Note that Ubuntu packages do
+not seem to get updated anymore. Since we are not affiliated with
+Ubuntu, there is little we can do. Feel free to report bugs to the
+Ubuntu packaging guys - all they have to do is update the package to a
+somewhat recent version. See above for a way to update.
+
+Do I always have to pass in --max-quality FORMAT, or -citw?
+
+By default, youtube-dl intends to have the best options (incidentally,
+if you have a convincing case that these should be different, please
+file an issue where you explain that). Therefore, it is unnecessary and
+sometimes harmful to copy long option strings from webpages. In
+particular, --max-quality limits the video quality (so if you want the
+best quality, do NOT pass it in), and the only option out of -citw that
+is regularly useful is -i.
+
Can you please put the -b option back?
Most people asking this question are not aware that youtube-dl now
4. Start with this simple template and save it to
youtube_dl/extractor/yourextractor.py:
- # coding: utf-8
- from __future__ import unicode_literals
-
- import re
-
- from .common import InfoExtractor
-
-
- class YourExtractorIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
- _TEST = {
- 'url': 'http://yourextractor.com/watch/42',
- 'md5': 'TODO: md5 sum of the first 10KiB of the video file',
- 'info_dict': {
- 'id': '42',
- 'ext': 'mp4',
- 'title': 'Video title goes here',
- # TODO more properties, either as:
- # * A value
- # * MD5 checksum; start the string with md5:
- # * A regular expression; start the string with re:
- # * Any Python type (for example int or float)
- }
+ ``` {.python}
+ # coding: utf-8
+ from __future__ import unicode_literals
+
+ from .common import InfoExtractor
+
+
+ class YourExtractorIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://yourextractor.com/watch/42',
+ 'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+ 'info_dict': {
+ 'id': '42',
+ 'ext': 'mp4',
+ 'title': 'Video title goes here',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ # TODO more properties, either as:
+ # * A value
+ # * MD5 checksum; start the string with md5:
+ # * A regular expression; start the string with re:
+ # * Any Python type (for example int or float)
}
+ }
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
- # TODO more code goes here, for example ...
- webpage = self._download_webpage(url, video_id)
- title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+ # TODO more code goes here, for example ...
+ webpage = self._download_webpage(url, video_id)
+ title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
- return {
- 'id': video_id,
- 'title': title,
- # TODO more properties (see youtube_dl/extractor/common.py)
- }
+ return {
+ 'id': video_id,
+ 'title': title,
+ # TODO more properties (see youtube_dl/extractor/common.py)
+ }
+ ```
5. Add an import in youtube_dl/extractor/__init__.py.
6. Run python test/test_download.py TestDownload.test_YourExtractor.
This should fail at first, but you can continually re-run it until
- you're done.
+ you're done. If you decide to add more than one test, then rename
+ _TEST to _TESTS and make it into a list of dictionaries. The tests
+ will be then be named TestDownload.test_YourExtractor,
+ TestDownload.test_YourExtractor_1,
+ TestDownload.test_YourExtractor_2, etc.
7. Have a look at youtube_dl/common/extractor/common.py for possible
helper methods and a detailed description of what your extractor
should return. Add tests and code for as many as you want.
In any case, thank you very much for your contributions!
+EMBEDDING YOUTUBE-DL
+====================
+
+youtube-dl makes the best effort to be a good command-line program, and
+thus should be callable from any programming language. If you encounter
+any problems parsing its output, feel free to create a report.
+
+From a Python program, you can embed youtube-dl in a more powerful
+fashion, like this:
+
+ import youtube_dl
+
+ ydl_opts = {}
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+ ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
+
+Most likely, you'll want to use various options. For a list of what can
+be done, have a look at youtube_dl/YoutubeDL.py. For a start, if you
+want to intercept youtube-dl's output, set a logger object.
+
BUGS
====
--- /dev/null
+
+{{commands}}
+
+
+complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
--- /dev/null
+#!/usr/bin/env python
+from __future__ import unicode_literals
+
+import optparse
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+from youtube_dl.utils import shell_quote
+
+FISH_COMPLETION_FILE = 'youtube-dl.fish'
+FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
+
+EXTRA_ARGS = {
+ 'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
+
+ # Options that need a file parameter
+ 'download-archive': ['--require-parameter'],
+ 'cookies': ['--require-parameter'],
+ 'load-info': ['--require-parameter'],
+ 'batch-file': ['--require-parameter'],
+}
+
+def build_completion(opt_parser):
+ commands = []
+
+ for group in opt_parser.option_groups:
+ for option in group.option_list:
+ long_option = option.get_opt_string().strip('-')
+ help_msg = shell_quote([option.help])
+ complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
+ if option._short_opts:
+ complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
+ if option.help != optparse.SUPPRESS_HELP:
+ complete_cmd += ['--description', option.help]
+ complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
+ commands.append(shell_quote(complete_cmd))
+
+ with open(FISH_COMPLETION_TEMPLATE) as f:
+ template = f.read()
+ filled_template = template.replace('{{commands}}', '\n'.join(commands))
+ with open(FISH_COMPLETION_FILE, 'w') as f:
+ f.write(filled_template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
-git checkout HEAD -- youtube-dl youtube-dl.exe
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done
--- /dev/null
+#compdef youtube-dl
+
+__youtube_dl() {
+ local curcontext="$curcontext" fileopts diropts cur prev
+ typeset -A opt_args
+ fileopts="{{fileopts}}"
+ diropts="{{diropts}}"
+ cur=$words[CURRENT]
+ case $cur in
+ :)
+ _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
+ ;;
+ *)
+ prev=$words[CURRENT-1]
+ if [[ ${prev} =~ ${fileopts} ]]; then
+ _path_files
+ elif [[ ${prev} =~ ${diropts} ]]; then
+ _path_files -/
+ elif [[ ${prev} == "--recode-video" ]]; then
+ _arguments '*: :(mp4 flv ogg webm mkv)'
+ else
+ _arguments '*: :({{flags}})'
+ fi
+ ;;
+ esac
+}
+
+__youtube_dl
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+import os
+from os.path import dirname as dirn
+import sys
+
+sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+import youtube_dl
+
+ZSH_COMPLETION_FILE = "youtube-dl.zsh"
+ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
+
+
+def build_completion(opt_parser):
+ opts = [opt for group in opt_parser.option_groups
+ for opt in group.option_list]
+ opts_file = [opt for opt in opts if opt.metavar == "FILE"]
+ opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
+
+ fileopts = []
+ for opt in opts_file:
+ if opt._short_opts:
+ fileopts.extend(opt._short_opts)
+ if opt._long_opts:
+ fileopts.extend(opt._long_opts)
+
+ diropts = []
+ for opt in opts_dir:
+ if opt._short_opts:
+ diropts.extend(opt._short_opts)
+ if opt._long_opts:
+ diropts.extend(opt._long_opts)
+
+ flags = [opt.get_opt_string() for opt in opts]
+
+ with open(ZSH_COMPLETION_TEMPLATE) as f:
+ template = f.read()
+
+ template = template.replace("{{fileopts}}", "|".join(fileopts))
+ template = template.replace("{{diropts}}", "|".join(diropts))
+ template = template.replace("{{flags}}", " ".join(flags))
+
+ with open(ZSH_COMPLETION_FILE, "w") as f:
+ f.write(template)
+
+parser = youtube_dl.parseOpts()[0]
+build_completion(parser)
# built documents.
#
# The short X.Y version.
-import youtube_dl
-version = youtube_dl.__version__
+from youtube_dl.version import __version__
+version = __version__
# The full version, including alpha/beta/rc tags.
release = version
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
+ ('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
+from __future__ import unicode_literals
+
import errno
import io
import hashlib
from youtube_dl.utils import (
compat_str,
preferredencoding,
+ write_string,
)
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
- _msg_header = u'\033[0;33mWARNING:\033[0m'
+ _msg_header = '\033[0;33mWARNING:\033[0m'
else:
- _msg_header = u'WARNING:'
- output = u'%s %s\n' % (_msg_header, message)
+ _msg_header = 'WARNING:'
+ output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
match_rex = re.compile(match_str)
self.assertTrue(
- isinstance(got, compat_str) and match_rex.match(got),
- u'field %s (value: %r) should match %r' % (info_field, got, match_str))
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, info_field))
+ self.assertTrue(
+ match_rex.match(got),
+ 'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
- u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
+ 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
- u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+ 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
- for key in ('id', 'url', 'title', 'ext'):
- self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
+ if got_dict.get('_type') != 'playlist':
+ for key in ('id', 'url', 'title', 'ext'):
+ self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
- self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
+ self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
- sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
+ def _repr(v):
+ if isinstance(v, compat_str):
+ return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
+ else:
+ return repr(v)
+ info_dict_str = ''.join(
+ ' %s: %s,\n' % (_repr(k), _repr(v))
+ for k, v in test_info_dict.items())
+ write_string('\n"info_dict": {\n' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
+
+
+def expect_warnings(ydl, warnings_re):
+ real_warning = ydl.report_warning
+
+ def _report_warning(w):
+ if not any(re.search(w_re, w) for w_re in warnings_re):
+ real_warning(w)
+
+ ydl.report_warning = _report_warning
"rejecttitle": null,
"retries": 10,
"simulate": false,
- "skip_download": false,
"subtitleslang": null,
"subtitlesformat": "srt",
"test": true,
'138', '137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
- '141', '172', '140', '139', '171',
+ '141', '172', '140', '171', '139',
]
for f1id, f2id in zip(order, order[1:]):
from youtube_dl.extractor import (
FacebookIE,
gen_extractors,
- JustinTVIE,
+ TwitchIE,
YoutubeIE,
)
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
- def test_justin_tv_channelid_matching(self):
- self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
- self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
- self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
+ def test_twitch_channelid_matching(self):
+ self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
+ self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
+ self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
+ self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
- def test_justintv_videoid_matching(self):
- self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
+ def test_twitch_videoid_matching(self):
+ self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
- def test_justin_tv_chapterid_matching(self):
- self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
+ def test_twitch_chapterid_matching(self):
+ self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
def test_youtube_extract(self):
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
def test_facebook_matching(self):
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
+ self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793'))
def test_no_duplicates(self):
ies = gen_extractors()
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
else:
- self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
+ self.assertFalse(
+ ie.suitable(url),
+ '%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name']))
def test_keywords(self):
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
- def test_ComedyCentralShows(self):
- self.assertMatch(
- 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
- ['ComedyCentralShows'])
- self.assertMatch(
- 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
- ['ComedyCentralShows'])
-
def test_yahoo_https(self):
# https://github.com/rg3/youtube-dl/issues/2701
self.assertMatch(
--- /dev/null
+#!/usr/bin/env python
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+import shutil
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+from test.helper import FakeYDL
+from youtube_dl.cache import Cache
+
+
+def _is_empty(d):
+ return not bool(os.listdir(d))
+
+
+def _mkdir(d):
+ if not os.path.exists(d):
+ os.mkdir(d)
+
+
+class TestCache(unittest.TestCase):
+ def setUp(self):
+ TEST_DIR = os.path.dirname(os.path.abspath(__file__))
+ TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
+ _mkdir(TESTDATA_DIR)
+ self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
+ self.tearDown()
+
+ def tearDown(self):
+ if os.path.exists(self.test_dir):
+ shutil.rmtree(self.test_dir)
+
+ def test_cache(self):
+ ydl = FakeYDL({
+ 'cachedir': self.test_dir,
+ })
+ c = Cache(ydl)
+ obj = {'x': 1, 'y': ['ä', '\\a', True]}
+ self.assertEqual(c.load('test_cache', 'k.'), None)
+ c.store('test_cache', 'k.', obj)
+ self.assertEqual(c.load('test_cache', 'k2'), None)
+ self.assertFalse(_is_empty(self.test_dir))
+ self.assertEqual(c.load('test_cache', 'k.'), obj)
+ self.assertEqual(c.load('test_cache', 'y'), None)
+ self.assertEqual(c.load('test_cache2', 'k.'), None)
+ c.remove()
+ self.assertFalse(os.path.exists(self.test_dir))
+ self.assertEqual(c.load('test_cache', 'k.'), None)
+
+
+if __name__ == '__main__':
+ unittest.main()
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
+ assertGreaterEqual,
+ expect_warnings,
get_params,
gettestcases,
expect_info_dict,
compat_HTTPError,
DownloadError,
ExtractorError,
+ format_bytes,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
def test_template(self):
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
+ is_playlist = any(k.startswith('playlist') for k in test_case)
+ test_cases = test_case.get(
+ 'playlist', [] if is_playlist else [test_case])
+
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
return
- if 'playlist' not in test_case:
- info_dict = test_case.get('info_dict', {})
- if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
+
+ for tc in test_cases:
+ info_dict = tc.get('info_dict', {})
+ if not tc.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
+
if 'skip' in test_case:
print_skipping(test_case['skip'])
return
return
params = get_params(test_case.get('params', {}))
+ if is_playlist and 'playlist' not in test_case:
+ params.setdefault('extract_flat', True)
+ params.setdefault('skip_download', True)
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
ydl.add_progress_hook(_hook)
+ expect_warnings(ydl, test_case.get('expected_warnings', []))
def get_tc_filename(tc):
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
- test_cases = test_case.get('playlist', [test_case])
- def try_rm_tcs_files():
- for tc in test_cases:
+ res_dict = None
+ def try_rm_tcs_files(tcs=None):
+ if tcs is None:
+ tcs = test_cases
+ for tc in tcs:
tc_filename = get_tc_filename(tc)
try_rm(tc_filename)
try_rm(tc_filename + '.part')
try_num = 1
while True:
try:
- ydl.download([test_case['url']])
+ # We're not using .download here sine that is just a shim
+ # for outside error handling, and returns the exit code
+ # instead of the result dict.
+ res_dict = ydl.extract_info(test_case['url'])
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
else:
break
+ if is_playlist:
+ self.assertEqual(res_dict['_type'], 'playlist')
+ self.assertTrue('entries' in res_dict)
+ expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
+
+ if 'playlist_mincount' in test_case:
+ assertGreaterEqual(
+ self,
+ len(res_dict['entries']),
+ test_case['playlist_mincount'],
+ 'Expected at least %d in playlist %s, but got only %d' % (
+ test_case['playlist_mincount'], test_case['url'],
+ len(res_dict['entries'])))
+ if 'playlist_count' in test_case:
+ self.assertEqual(
+ len(res_dict['entries']),
+ test_case['playlist_count'],
+ 'Expected %d entries in playlist %s, but got %d.' % (
+ test_case['playlist_count'],
+ test_case['url'],
+ len(res_dict['entries']),
+ ))
+ if 'playlist_duration_sum' in test_case:
+ got_duration = sum(e['duration'] for e in res_dict['entries'])
+ self.assertEqual(
+ test_case['playlist_duration_sum'], got_duration)
+
for tc in test_cases:
tc_filename = get_tc_filename(tc)
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
self.assertTrue(tc_filename in finished_hook_called)
+ expected_minsize = tc.get('file_minsize', 10000)
+ if expected_minsize is not None:
+ if params.get('test'):
+ expected_minsize = max(expected_minsize, 10000)
+ got_fsize = os.path.getsize(tc_filename)
+ assertGreaterEqual(
+ self, got_fsize, expected_minsize,
+ 'Expected %s to be at least %s, but it\'s only %s ' %
+ (tc_filename, format_bytes(expected_minsize),
+ format_bytes(got_fsize)))
+ if 'md5' in tc:
+ md5_for_file = _file_md5(tc_filename)
+ self.assertEqual(md5_for_file, tc['md5'])
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
- self.assertTrue(os.path.exists(info_json_fn))
- if 'md5' in tc:
- md5_for_file = _file_md5(tc_filename)
- self.assertEqual(md5_for_file, tc['md5'])
+ self.assertTrue(
+ os.path.exists(info_json_fn),
+ 'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
finally:
try_rm_tcs_files()
+ if is_playlist and res_dict is not None and res_dict.get('entries'):
+ # Remove all other files that may have been extracted if the
+ # extractor returns full results even with extract_flat
+ res_tcs = [{'info_dict': e} for e in res_dict['entries']]
+ try_rm_tcs_files(res_tcs)
return test_template
+++ /dev/null
-#!/usr/bin/env python
-# encoding: utf-8
-
-from __future__ import unicode_literals
-
-# Allow direct execution
-import os
-import sys
-import unittest
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-from test.helper import (
- assertRegexpMatches,
- assertGreaterEqual,
- expect_info_dict,
- FakeYDL,
-)
-
-from youtube_dl.extractor import (
- AcademicEarthCourseIE,
- DailymotionPlaylistIE,
- DailymotionUserIE,
- VimeoChannelIE,
- VimeoUserIE,
- VimeoAlbumIE,
- VimeoGroupsIE,
- VineUserIE,
- UstreamChannelIE,
- SoundcloudSetIE,
- SoundcloudUserIE,
- SoundcloudPlaylistIE,
- TeacherTubeUserIE,
- LivestreamIE,
- LivestreamOriginalIE,
- NHLVideocenterIE,
- BambuserChannelIE,
- BandcampAlbumIE,
- SmotriCommunityIE,
- SmotriUserIE,
- IviCompilationIE,
- ImdbListIE,
- KhanAcademyIE,
- EveryonesMixtapeIE,
- RutubeChannelIE,
- RutubePersonIE,
- GoogleSearchIE,
- GenericIE,
- TEDIE,
- ToypicsUserIE,
- XTubeUserIE,
- InstagramUserIE,
- CSpanIE,
- AolIE,
-)
-
-
-class TestPlaylists(unittest.TestCase):
- def assertIsPlaylist(self, info):
- """Make sure the info has '_type' set to 'playlist'"""
- self.assertEqual(info['_type'], 'playlist')
-
- def test_dailymotion_playlist(self):
- dl = FakeYDL()
- ie = DailymotionPlaylistIE(dl)
- result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'SPORT')
- self.assertTrue(len(result['entries']) > 20)
-
- def test_dailymotion_user(self):
- dl = FakeYDL()
- ie = DailymotionUserIE(dl)
- result = ie.extract('https://www.dailymotion.com/user/nqtv')
- self.assertIsPlaylist(result)
- assertGreaterEqual(self, len(result['entries']), 100)
- self.assertEqual(result['title'], 'Rémi Gaillard')
-
- def test_vimeo_channel(self):
- dl = FakeYDL()
- ie = VimeoChannelIE(dl)
- result = ie.extract('http://vimeo.com/channels/tributes')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'Vimeo Tributes')
- self.assertTrue(len(result['entries']) > 24)
-
- def test_vimeo_user(self):
- dl = FakeYDL()
- ie = VimeoUserIE(dl)
- result = ie.extract('http://vimeo.com/nkistudio/videos')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'Nki')
- self.assertTrue(len(result['entries']) > 65)
-
- def test_vimeo_album(self):
- dl = FakeYDL()
- ie = VimeoAlbumIE(dl)
- result = ie.extract('http://vimeo.com/album/2632481')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'Staff Favorites: November 2013')
- self.assertTrue(len(result['entries']) > 12)
-
- def test_vimeo_groups(self):
- dl = FakeYDL()
- ie = VimeoGroupsIE(dl)
- result = ie.extract('http://vimeo.com/groups/rolexawards')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
- self.assertTrue(len(result['entries']) > 72)
-
- def test_vine_user(self):
- dl = FakeYDL()
- ie = VineUserIE(dl)
- result = ie.extract('https://vine.co/Visa')
- self.assertIsPlaylist(result)
- assertGreaterEqual(self, len(result['entries']), 47)
-
- def test_ustream_channel(self):
- dl = FakeYDL()
- ie = UstreamChannelIE(dl)
- result = ie.extract('http://www.ustream.tv/channel/channeljapan')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '10874166')
- assertGreaterEqual(self, len(result['entries']), 54)
-
- def test_soundcloud_set(self):
- dl = FakeYDL()
- ie = SoundcloudSetIE(dl)
- result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'The Royal Concept EP')
- assertGreaterEqual(self, len(result['entries']), 6)
-
- def test_soundcloud_user(self):
- dl = FakeYDL()
- ie = SoundcloudUserIE(dl)
- result = ie.extract('https://soundcloud.com/the-concept-band')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '9615865')
- assertGreaterEqual(self, len(result['entries']), 12)
-
- def test_soundcloud_likes(self):
- dl = FakeYDL()
- ie = SoundcloudUserIE(dl)
- result = ie.extract('https://soundcloud.com/the-concept-band/likes')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '9615865')
- assertGreaterEqual(self, len(result['entries']), 1)
-
- def test_soundcloud_playlist(self):
- dl = FakeYDL()
- ie = SoundcloudPlaylistIE(dl)
- result = ie.extract('http://api.soundcloud.com/playlists/4110309')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '4110309')
- self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
- assertRegexpMatches(
- self, result['description'], r'.*?TILT Brass - Bowery Poetry Club')
- self.assertEqual(len(result['entries']), 6)
-
- def test_livestream_event(self):
- dl = FakeYDL()
- ie = LivestreamIE(dl)
- result = ie.extract('http://new.livestream.com/tedx/cityenglish')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'TEDCity2.0 (English)')
- assertGreaterEqual(self, len(result['entries']), 4)
-
- def test_livestreamoriginal_folder(self):
- dl = FakeYDL()
- ie = LivestreamOriginalIE(dl)
- result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
- assertGreaterEqual(self, len(result['entries']), 28)
-
- def test_nhl_videocenter(self):
- dl = FakeYDL()
- ie = NHLVideocenterIE(dl)
- result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '999')
- self.assertEqual(result['title'], 'Highlights')
- self.assertEqual(len(result['entries']), 12)
-
- def test_bambuser_channel(self):
- dl = FakeYDL()
- ie = BambuserChannelIE(dl)
- result = ie.extract('http://bambuser.com/channel/pixelversity')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'pixelversity')
- assertGreaterEqual(self, len(result['entries']), 60)
-
- def test_bandcamp_album(self):
- dl = FakeYDL()
- ie = BandcampAlbumIE(dl)
- result = ie.extract('http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'Hierophany of the Open Grave')
- assertGreaterEqual(self, len(result['entries']), 9)
-
- def test_smotri_community(self):
- dl = FakeYDL()
- ie = SmotriCommunityIE(dl)
- result = ie.extract('http://smotri.com/community/video/kommuna')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'kommuna')
- self.assertEqual(result['title'], 'КПРФ')
- assertGreaterEqual(self, len(result['entries']), 4)
-
- def test_smotri_user(self):
- dl = FakeYDL()
- ie = SmotriUserIE(dl)
- result = ie.extract('http://smotri.com/user/inspector')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'inspector')
- self.assertEqual(result['title'], 'Inspector')
- assertGreaterEqual(self, len(result['entries']), 9)
-
- def test_AcademicEarthCourse(self):
- dl = FakeYDL()
- ie = AcademicEarthCourseIE(dl)
- result = ie.extract('http://academicearth.org/playlists/laws-of-nature/')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'laws-of-nature')
- self.assertEqual(result['title'], 'Laws of Nature')
- self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
- self.assertEqual(len(result['entries']), 4)
-
- def test_ivi_compilation(self):
- dl = FakeYDL()
- ie = IviCompilationIE(dl)
- result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'dvoe_iz_lartsa')
- self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
- assertGreaterEqual(self, len(result['entries']), 24)
-
- def test_ivi_compilation_season(self):
- dl = FakeYDL()
- ie = IviCompilationIE(dl)
- result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa/season1')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
- self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
- assertGreaterEqual(self, len(result['entries']), 12)
-
- def test_imdb_list(self):
- dl = FakeYDL()
- ie = ImdbListIE(dl)
- result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'JFs9NWw6XI0')
- self.assertEqual(result['title'], 'March 23, 2012 Releases')
- self.assertEqual(len(result['entries']), 7)
-
- def test_khanacademy_topic(self):
- dl = FakeYDL()
- ie = KhanAcademyIE(dl)
- result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'cryptography')
- self.assertEqual(result['title'], 'Journey into cryptography')
- self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
- assertGreaterEqual(self, len(result['entries']), 3)
-
- def test_EveryonesMixtape(self):
- dl = FakeYDL()
- ie = EveryonesMixtapeIE(dl)
- result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'm7m0jJAbMQi')
- self.assertEqual(result['title'], 'Driving')
- self.assertEqual(len(result['entries']), 24)
-
- def test_rutube_channel(self):
- dl = FakeYDL()
- ie = RutubeChannelIE(dl)
- result = ie.extract('http://rutube.ru/tags/video/1800/')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '1800')
- assertGreaterEqual(self, len(result['entries']), 68)
-
- def test_rutube_person(self):
- dl = FakeYDL()
- ie = RutubePersonIE(dl)
- result = ie.extract('http://rutube.ru/video/person/313878/')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '313878')
- assertGreaterEqual(self, len(result['entries']), 37)
-
- def test_multiple_brightcove_videos(self):
- # https://github.com/rg3/youtube-dl/issues/2283
- dl = FakeYDL()
- ie = GenericIE(dl)
- result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'always-never-nuclear-command-and-control')
- self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
- self.assertEqual(len(result['entries']), 3)
-
- def test_GoogleSearch(self):
- dl = FakeYDL()
- ie = GoogleSearchIE(dl)
- result = ie.extract('gvsearch15:python language')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'python language')
- self.assertEqual(result['title'], 'python language')
- self.assertEqual(len(result['entries']), 15)
-
- def test_generic_rss_feed(self):
- dl = FakeYDL()
- ie = GenericIE(dl)
- result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml')
- self.assertEqual(result['title'], 'Zero Punctuation')
- self.assertTrue(len(result['entries']) > 10)
-
- def test_ted_playlist(self):
- dl = FakeYDL()
- ie = TEDIE(dl)
- result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '10')
- self.assertEqual(result['title'], 'Who are the hackers?')
- assertGreaterEqual(self, len(result['entries']), 6)
-
- def test_toypics_user(self):
- dl = FakeYDL()
- ie = ToypicsUserIE(dl)
- result = ie.extract('http://videos.toypics.net/Mikey')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'Mikey')
- assertGreaterEqual(self, len(result['entries']), 17)
-
- def test_xtube_user(self):
- dl = FakeYDL()
- ie = XTubeUserIE(dl)
- result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'greenshowers')
- assertGreaterEqual(self, len(result['entries']), 155)
-
- def test_InstagramUser(self):
- dl = FakeYDL()
- ie = InstagramUserIE(dl)
- result = ie.extract('http://instagram.com/porsche')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'porsche')
- assertGreaterEqual(self, len(result['entries']), 2)
- test_video = next(
- e for e in result['entries']
- if e['id'] == '614605558512799803_462752227')
- dl.add_default_extra_info(test_video, ie, '(irrelevant URL)')
- dl.process_video_result(test_video, download=False)
- EXPECTED = {
- 'id': '614605558512799803_462752227',
- 'ext': 'mp4',
- 'title': '#Porsche Intelligent Performance.',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'uploader': 'Porsche',
- 'uploader_id': 'porsche',
- 'timestamp': 1387486713,
- 'upload_date': '20131219',
- }
- expect_info_dict(self, EXPECTED, test_video)
-
- def test_CSpan_playlist(self):
- dl = FakeYDL()
- ie = CSpanIE(dl)
- result = ie.extract(
- 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '342759')
- self.assertEqual(
- result['title'], 'General Motors Ignition Switch Recall')
- whole_duration = sum(e['duration'] for e in result['entries'])
- self.assertEqual(whole_duration, 14855)
-
- def test_aol_playlist(self):
- dl = FakeYDL()
- ie = AolIE(dl)
- result = ie.extract(
- 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '152147')
- self.assertEqual(
- result['title'], 'Brace Yourself - Today\'s Weirdest News')
- assertGreaterEqual(self, len(result['entries']), 10)
-
- def test_TeacherTubeUser(self):
- dl = FakeYDL()
- ie = TeacherTubeUserIE(dl)
- result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
- self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'rbhagwati2')
- assertGreaterEqual(self, len(result['entries']), 179)
-
-if __name__ == '__main__':
- unittest.main()
DailymotionIE,
TEDIE,
VimeoIE,
+ WallaIE,
)
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
+class TestWallaSubtitles(BaseTestSubtitles):
+ url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
+ IE = WallaIE
+
+ def test_list_subtitles(self):
+ self.DL.expect_warning(u'Automatic Captions not supported by this server')
+ self.DL.params['listsubtitles'] = True
+ info_dict = self.getInfoDict()
+ self.assertEqual(info_dict, None)
+
+ def test_allsubtitles(self):
+ self.DL.expect_warning(u'Automatic Captions not supported by this server')
+ self.DL.params['writesubtitles'] = True
+ self.DL.params['allsubtitles'] = True
+ subtitles = self.getSubtitles()
+ self.assertEqual(set(subtitles.keys()), set(['heb']))
+ self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
+
+ def test_nosubtitles(self):
+ self.DL.expect_warning(u'video doesn\'t have subtitles')
+ self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
+ self.DL.params['writesubtitles'] = True
+ self.DL.params['allsubtitles'] = True
+ subtitles = self.getSubtitles()
+ self.assertEqual(len(subtitles), 0)
+
+
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# coding: utf-8
+from __future__ import unicode_literals
+
# Allow direct execution
import os
import sys
import json
import xml.etree.ElementTree
-#from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import (
DateRange,
encodeFilename,
fix_xml_ampersands,
get_meta_content,
orderedSet,
- PagedList,
+ OnDemandPagedList,
+ InAdvancePagedList,
parse_duration,
read_batch_urls,
sanitize_filename,
parse_iso8601,
strip_jsonp,
uppercase_escape,
+ limit_length,
+ escape_rfc3986,
+ escape_url,
+ js_to_json,
+ get_filesystem_encoding,
+ compat_getenv,
+ compat_expanduser,
)
-if sys.version_info < (3, 0):
- _compat_str = lambda b: b.decode('unicode-escape')
-else:
- _compat_str = lambda s: s
-
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
- aumlaut = _compat_str('\xe4')
+ aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
- tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
+ tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
forbidden = '"\0\\/'
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
- tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
+ tests = 'a\xe4b\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
- self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
+ self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
- self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
- self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
+ self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
+ self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
- self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
+ self.assertEqual(unescapeHTML('%20;'), '%20;')
+ self.assertEqual(
+ unescapeHTML('é'), 'é')
def test_daterange(self):
_20century = DateRange("19000101","20000101")
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
+ self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
def test_find_xpath_attr(self):
- testxml = u'''<root>
+ testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
def test_meta_parser(self):
- testhtml = u'''
+ testhtml = '''
<head>
<meta name="description" content="foo & bar">
<meta content='Plato' name='author'/>
</head>
'''
get_meta = lambda name: get_meta_content(name, testhtml)
- self.assertEqual(get_meta('description'), u'foo & bar')
+ self.assertEqual(get_meta('description'), 'foo & bar')
self.assertEqual(get_meta('author'), 'Plato')
def test_xpath_with_ns(self):
- testxml = u'''<root xmlns:media="http://example.com/">
+ testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
- self.assertEqual(find('media:song/media:author').text, u'The Author')
- self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
+ self.assertEqual(find('media:song/media:author').text, 'The Author')
+ self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_smuggle_url(self):
data = {u"ö": u"ö", u"abc": [3]}
self.assertEqual(res_data, None)
def test_shell_quote(self):
- args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
- self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
+ args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
+ self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
- self.assertEqual(url_basename(u'http://foo.de/'), u'')
- self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
- self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
- self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
- self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
+ self.assertEqual(url_basename('http://foo.de/'), '')
+ self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
+ self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
+ self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
+ self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
- url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
- u'trailer.mp4')
+ url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
+ 'trailer.mp4')
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
+ self.assertEqual(parse_duration('3h 11m 53s'), 11513)
+ self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
+ self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
+ self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
def test_fix_xml_ampersands(self):
self.assertEqual(
for i in range(firstid, upto):
yield i
- pl = PagedList(get_page, pagesize)
+ pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
+ iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
+ got = iapl.getslice(*sliceargs)
+ self.assertEqual(got, expected)
+
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_struct_unpack(self):
- self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
+ self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
def test_read_batch_urls(self):
- f = io.StringIO(u'''\xef\xbb\xbf foo
+ f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
- self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam'])
+ self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
+ self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
- def test_uppercase_escpae(self):
- self.assertEqual(uppercase_escape(u'aä'), u'aä')
- self.assertEqual(uppercase_escape(u'\\U0001d550'), u'𝕐')
+ def test_uppercase_escape(self):
+ self.assertEqual(uppercase_escape('aä'), 'aä')
+ self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
+
+ def test_limit_length(self):
+ self.assertEqual(limit_length(None, 12), None)
+ self.assertEqual(limit_length('foo', 12), 'foo')
+ self.assertTrue(
+ limit_length('foo bar baz asd', 12).startswith('foo bar'))
+ self.assertTrue('...' in limit_length('foo bar baz asd', 12))
+
+ def test_escape_rfc3986(self):
+ reserved = "!*'();:@&=+$,/?#[]"
+ unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
+ self.assertEqual(escape_rfc3986(reserved), reserved)
+ self.assertEqual(escape_rfc3986(unreserved), unreserved)
+ self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
+ self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
+ self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
+ self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
+
+ def test_escape_url(self):
+ self.assertEqual(
+ escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
+ 'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
+ )
+ self.assertEqual(
+ escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
+ 'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
+ )
+ self.assertEqual(
+ escape_url('http://тест.рф/фрагмент'),
+ 'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
+ )
+ self.assertEqual(
+ escape_url('http://тест.рф/абв?абв=абв#абв'),
+ 'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
+ )
+ self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
+
+ def test_js_to_json_realworld(self):
+ inp = '''{
+ 'clip':{'provider':'pseudo'}
+ }'''
+ self.assertEqual(js_to_json(inp), '''{
+ "clip":{"provider":"pseudo"}
+ }''')
+ json.loads(js_to_json(inp))
+
+ inp = '''{
+ 'playlist':[{'controls':{'all':null}}]
+ }'''
+ self.assertEqual(js_to_json(inp), '''{
+ "playlist":[{"controls":{"all":null}}]
+ }''')
+
+ def test_js_to_json_edgecases(self):
+ on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
+ self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
+
+ on = js_to_json('{"abc": true}')
+ self.assertEqual(json.loads(on), {'abc': True})
+
+ def test_compat_getenv(self):
+ test_str = 'тест'
+ os.environ['YOUTUBE-DL-TEST'] = (test_str if sys.version_info >= (3, 0)
+ else test_str.encode(get_filesystem_encoding()))
+ self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
+
+ def test_compat_expanduser(self):
+ test_str = 'C:\Documents and Settings\тест\Application Data'
+ os.environ['HOME'] = (test_str if sys.version_info >= (3, 0)
+ else test_str.encode(get_filesystem_encoding()))
+ self.assertEqual(compat_expanduser('~'), test_str)
if __name__ == '__main__':
unittest.main()
from youtube_dl.extractor import (
- YoutubeUserIE,
YoutubePlaylistIE,
YoutubeIE,
YoutubeChannelIE,
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
- def test_youtube_playlist(self):
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'ytdl test PL')
- ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
- self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
-
def test_youtube_playlist_noplaylist(self):
dl = FakeYDL()
dl.params['noplaylist'] = True
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
-
- def test_issue_673(self):
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('PLBB231211A4F62143')
- self.assertTrue(len(result['entries']) > 25)
-
- def test_youtube_playlist_long(self):
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
- self.assertIsPlaylist(result)
- self.assertTrue(len(result['entries']) >= 799)
-
- def test_youtube_playlist_with_deleted(self):
- #651
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
- ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
- self.assertFalse('pElCt5oNDuI' in ytie_results)
- self.assertFalse('KdPEApIVdWM' in ytie_results)
-
- def test_youtube_playlist_empty(self):
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
- self.assertIsPlaylist(result)
- self.assertEqual(len(result['entries']), 0)
-
+
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
- def test_youtube_channel(self):
- dl = FakeYDL()
- ie = YoutubeChannelIE(dl)
- #test paginated channel
- result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
- self.assertTrue(len(result['entries']) > 90)
- #test autogenerated channel
- result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
- self.assertTrue(len(result['entries']) >= 18)
-
- def test_youtube_user(self):
- dl = FakeYDL()
- ie = YoutubeUserIE(dl)
- result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
- self.assertTrue(len(result['entries']) >= 320)
-
- def test_youtube_safe_search(self):
- dl = FakeYDL()
- ie = YoutubePlaylistIE(dl)
- result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
- self.assertEqual(len(result['entries']), 2)
-
- def test_youtube_show(self):
- dl = FakeYDL()
- ie = YoutubeShowIE(dl)
- result = ie.extract('http://www.youtube.com/show/airdisasters')
- self.assertTrue(len(result) >= 3)
-
def test_youtube_mix(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
entries = result['entries']
self.assertEqual(len(entries), 100)
- def test_youtube_toplist(self):
- dl = FakeYDL()
- ie = YoutubeTopListIE(dl)
- result = ie.extract('yttoplist:music:Trending')
- entries = result['entries']
- self.assertTrue(len(entries) >= 5)
-
- def test_youtube_search_url(self):
- dl = FakeYDL()
- ie = YoutubeSearchURLIE(dl)
- result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
- entries = result['entries']
- self.assertIsPlaylist(result)
- self.assertEqual(result['title'], 'youtube-dl test video')
- self.assertTrue(len(entries) >= 5)
-
if __name__ == '__main__':
unittest.main()
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
- (
- 'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
- 'swf',
- 86,
- 'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
- ),
- (
- 'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
- 'swf',
- 'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
- '9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
- ),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ configuration\ in\ ~/.config/youtube\-dl.conf
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (%APPDATA%/youtube\-dl/config.txt\ on
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Windows)
+\-\-flat\-playlist\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Do\ not\ extract\ the\ videos\ of\ a\ playlist,
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ only\ list\ them.
\f[]
.fi
.SS Video Selection:
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ downloaded\ videos\ in\ it.
\-\-include\-ads\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Download\ advertisements\ as\ well
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (experimental)
-\-\-youtube\-include\-dash\-manifest\ \ Try\ to\ download\ the\ DASH\ manifest\ on
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ YouTube\ videos\ (experimental)
\f[]
.fi
.SS Download Options:
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ downloads\ if\ possible.
\-\-no\-continue\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ resume\ partially\ downloaded\ files
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (restart\ from\ beginning)
-\-\-no\-part\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ use\ .part\ files
+\-\-no\-part\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ use\ .part\ files\ \-\ write\ directly
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ into\ output\ file
\-\-no\-mtime\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ use\ the\ Last\-modified\ header\ to\ set
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ the\ file\ modification\ time
\-\-write\-description\ \ \ \ \ \ \ \ \ \ \ \ \ \ write\ video\ description\ to\ a\ .description
\-j,\ \-\-dump\-json\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ simulate,\ quiet\ but\ print\ JSON\ information.
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ See\ \-\-output\ for\ a\ description\ of\ available
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ keys.
+\-J,\ \-\-dump\-single\-json\ \ \ \ \ \ \ \ \ \ \ simulate,\ quiet\ but\ print\ JSON\ information
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ for\ each\ command\-line\ argument.\ If\ the\ URL
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ refers\ to\ a\ playlist,\ dump\ the\ whole
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ playlist\ information\ in\ a\ single\ line.
\-\-newline\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ output\ progress\ bar\ as\ new\ lines
\-\-no\-progress\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ do\ not\ print\ progress\ bar
\-\-console\-title\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ display\ progress\ in\ console\ titlebar
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ information\ about\ the\ video.\ (Currently
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ supported\ only\ for\ YouTube)
\-\-user\-agent\ UA\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ user\ agent
-\-\-referer\ REF\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ referer,\ use\ if\ the\ video
+\-\-referer\ URL\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ specify\ a\ custom\ referer,\ use\ if\ the\ video
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ access\ is\ restricted\ to\ one\ domain
\-\-add\-header\ FIELD:VALUE\ \ \ \ \ \ \ \ \ specify\ a\ custom\ HTTP\ header\ and\ its\ value,
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ separated\ by\ a\ colon\ \[aq]:\[aq].\ You\ can\ use\ this
.nf
\f[C]
\-f,\ \-\-format\ FORMAT\ \ \ \ \ \ \ \ \ \ \ \ \ \ video\ format\ code,\ specify\ the\ order\ of
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ preference\ using\ slashes:\ "\-f\ 22/17/18".
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "\-f\ mp4"\ and\ "\-f\ flv"\ are\ also\ supported.
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ You\ can\ also\ use\ the\ special\ names\ "best",
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "bestvideo",\ "bestaudio",\ "worst",
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "worstvideo"\ and\ "worstaudio".\ By\ default,
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ youtube\-dl\ will\ pick\ the\ best\ quality.
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ preference\ using\ slashes:\ \-f\ 22/17/18\ .\ \ \-f
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ mp4\ ,\ \-f\ m4a\ and\ \ \-f\ flv\ \ are\ also
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ supported.\ You\ can\ also\ use\ the\ special
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ names\ "best",\ "bestvideo",\ "bestaudio",
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "worst",\ "worstvideo"\ and\ "worstaudio".\ By
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ default,\ youtube\-dl\ will\ pick\ the\ best
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ quality.\ Use\ commas\ to\ download\ multiple
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ audio\ formats,\ such\ as\ \ \-f
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 136/137/mp4/bestvideo,140/m4a/bestaudio
\-\-all\-formats\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ download\ all\ available\ video\ formats
\-\-prefer\-free\-formats\ \ \ \ \ \ \ \ \ \ \ \ prefer\ free\ video\ formats\ unless\ a\ specific
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ one\ is\ requested
\-\-max\-quality\ FORMAT\ \ \ \ \ \ \ \ \ \ \ \ \ highest\ quality\ format\ to\ download
\-F,\ \-\-list\-formats\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ list\ all\ available\ formats
+\-\-youtube\-skip\-dash\-manifest\ \ \ \ \ Do\ not\ download\ the\ DASH\ manifest\ on
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ YouTube\ videos
\f[]
.fi
.SS Subtitle Options:
.IP
.nf
\f[C]
-\-u,\ \-\-username\ USERNAME\ \ \ \ \ \ \ \ \ \ account\ username
+\-u,\ \-\-username\ USERNAME\ \ \ \ \ \ \ \ \ \ login\ with\ this\ account\ ID
\-p,\ \-\-password\ PASSWORD\ \ \ \ \ \ \ \ \ \ account\ password
+\-2,\ \-\-twofactor\ TWOFACTOR\ \ \ \ \ \ \ \ two\-factor\ auth\ code
\-n,\ \-\-netrc\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ .netrc\ authentication\ data
\-\-video\-password\ PASSWORD\ \ \ \ \ \ \ \ video\ password\ (vimeo,\ smotri)
\f[]
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (requires\ ffmpeg\ or\ avconv\ and\ ffprobe\ or
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ avprobe)
\-\-audio\-format\ FORMAT\ \ \ \ \ \ \ \ \ \ \ \ "best",\ "aac",\ "vorbis",\ "mp3",\ "m4a",
-\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "opus",\ or\ "wav";\ best\ by\ default
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ "opus",\ or\ "wav";\ "best"\ by\ default
\-\-audio\-quality\ QUALITY\ \ \ \ \ \ \ \ \ \ ffmpeg/avconv\ audio\ quality\ specification,
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ insert\ a\ value\ between\ 0\ (better)\ and\ 9
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (worse)\ for\ VBR\ or\ a\ specific\ bitrate\ like
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ postprocessors\ (default)
\-\-prefer\-ffmpeg\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Prefer\ ffmpeg\ over\ avconv\ for\ running\ the
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ postprocessors
+\-\-exec\ CMD\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Execute\ a\ command\ on\ the\ file\ after
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ downloading,\ similar\ to\ find\[aq]s\ \-exec
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ syntax.\ Example:\ \-\-exec\ \[aq]adb\ push\ {}
+\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ /sdcard/Music/\ &&\ rm\ {}\[aq]
\f[]
.fi
.SH CONFIGURATION
\f[]
.fi
.SH FAQ
+.SS How do I update youtube\-dl?
+.PP
+If you\[aq]ve followed our manual installation
+instructions (http://rg3.github.io/youtube-dl/download.html), you can
+simply run \f[C]youtube\-dl\ \-U\f[] (or, on Linux,
+\f[C]sudo\ youtube\-dl\ \-U\f[]).
+.PP
+If you have used pip, a simple
+\f[C]sudo\ pip\ install\ \-U\ youtube\-dl\f[] is sufficient to update.
+.PP
+If you have installed youtube\-dl using a package manager like
+\f[I]apt\-get\f[] or \f[I]yum\f[], use the standard system update
+mechanism to update.
+Note that distribution packages are often outdated.
+As a rule of thumb, youtube\-dl releases at least once a month, and
+often weekly or even daily.
+Simply go to http://yt\-dl.org/ to find out the current version.
+Unfortunately, there is nothing we youtube\-dl developers can do if your
+distributions serves a really outdated version.
+You can (and should) complain to your distribution in their bugtracker
+or support forum.
+.PP
+As a last resort, you can also uninstall the version installed by your
+package manager and follow our manual installation instructions.
+For that, remove the distribution\[aq]s package, with a line like
+.IP
+.nf
+\f[C]
+sudo\ apt\-get\ remove\ \-y\ youtube\-dl
+\f[]
+.fi
+.PP
+Afterwards, simply follow our manual installation
+instructions (http://rg3.github.io/youtube-dl/download.html):
+.IP
+.nf
+\f[C]
+sudo\ wget\ https://yt\-dl.org/latest/youtube\-dl\ \-O\ /usr/local/bin/youtube\-dl
+sudo\ chmod\ a+x\ /usr/local/bin/youtube\-dl
+hash\ \-r
+\f[]
+.fi
+.PP
+Again, from then on you\[aq]ll be able to update with
+\f[C]sudo\ youtube\-dl\ \-U\f[].
+.SS I\[aq]m getting an error
+\f[C]Unable\ to\ extract\ OpenGraph\ title\f[] on YouTube playlists
+.PP
+YouTube changed their playlist format in March 2014 and later on, so
+you\[aq]ll need at least youtube\-dl 2014.07.25 to download all YouTube
+videos.
+.PP
+If you have installed youtube\-dl with a package manager, pip, setup.py
+or a tarball, please use that to update.
+Note that Ubuntu packages do not seem to get updated anymore.
+Since we are not affiliated with Ubuntu, there is little we can do.
+Feel free to report bugs to the Ubuntu packaging guys \- all they have
+to do is update the package to a somewhat recent version.
+See above for a way to update.
+.SS Do I always have to pass in \f[C]\-\-max\-quality\ FORMAT\f[], or
+\f[C]\-citw\f[]?
+.PP
+By default, youtube\-dl intends to have the best options (incidentally,
+if you have a convincing case that these should be different, please
+file an issue where you explain that (https://yt-dl.org/bug)).
+Therefore, it is unnecessary and sometimes harmful to copy long option
+strings from webpages.
+In particular, \f[C]\-\-max\-quality\f[] \f[I]limits\f[] the video
+quality (so if you want the best quality, do NOT pass it in), and the
+only option out of \f[C]\-citw\f[] that is regularly useful is
+\f[C]\-i\f[].
.SS Can you please put the \-b option back?
.PP
Most people asking this question are not aware that youtube\-dl now
#\ coding:\ utf\-8
from\ __future__\ import\ unicode_literals
-import\ re
-
from\ .common\ import\ InfoExtractor
\ \ \ \ _VALID_URL\ =\ r\[aq]https?://(?:www\\.)?yourextractor\\.com/watch/(?P<id>[0\-9]+)\[aq]
\ \ \ \ _TEST\ =\ {
\ \ \ \ \ \ \ \ \[aq]url\[aq]:\ \[aq]http://yourextractor.com/watch/42\[aq],
-\ \ \ \ \ \ \ \ \[aq]md5\[aq]:\ \[aq]TODO:\ md5\ sum\ of\ the\ first\ 10KiB\ of\ the\ video\ file\[aq],
+\ \ \ \ \ \ \ \ \[aq]md5\[aq]:\ \[aq]TODO:\ md5\ sum\ of\ the\ first\ 10241\ bytes\ of\ the\ video\ file\ (use\ \-\-test)\[aq],
\ \ \ \ \ \ \ \ \[aq]info_dict\[aq]:\ {
\ \ \ \ \ \ \ \ \ \ \ \ \[aq]id\[aq]:\ \[aq]42\[aq],
\ \ \ \ \ \ \ \ \ \ \ \ \[aq]ext\[aq]:\ \[aq]mp4\[aq],
\ \ \ \ \ \ \ \ \ \ \ \ \[aq]title\[aq]:\ \[aq]Video\ title\ goes\ here\[aq],
+\ \ \ \ \ \ \ \ \ \ \ \ \[aq]thumbnail\[aq]:\ \[aq]re:^https?://.*\\.jpg$\[aq],
\ \ \ \ \ \ \ \ \ \ \ \ #\ TODO\ more\ properties,\ either\ as:
\ \ \ \ \ \ \ \ \ \ \ \ #\ *\ A\ value
\ \ \ \ \ \ \ \ \ \ \ \ #\ *\ MD5\ checksum;\ start\ the\ string\ with\ md5:
\ \ \ \ }
\ \ \ \ def\ _real_extract(self,\ url):
-\ \ \ \ \ \ \ \ mobj\ =\ re.match(self._VALID_URL,\ url)
-\ \ \ \ \ \ \ \ video_id\ =\ mobj.group(\[aq]id\[aq])
+\ \ \ \ \ \ \ \ video_id\ =\ self._match_id(url)
\ \ \ \ \ \ \ \ #\ TODO\ more\ code\ goes\ here,\ for\ example\ ...
\ \ \ \ \ \ \ \ webpage\ =\ self._download_webpage(url,\ video_id)
\f[C]python\ test/test_download.py\ TestDownload.test_YourExtractor\f[].
This \f[I]should fail\f[] at first, but you can continually re\-run it
until you\[aq]re done.
+If you decide to add more than one test, then rename \f[C]_TEST\f[] to
+\f[C]_TESTS\f[] and make it into a list of dictionaries.
+The tests will be then be named
+\f[C]TestDownload.test_YourExtractor\f[],
+\f[C]TestDownload.test_YourExtractor_1\f[],
+\f[C]TestDownload.test_YourExtractor_2\f[], etc.
.IP " 7." 4
Have a look at
\f[C]youtube_dl/common/extractor/common.py\f[] (https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py)
We\[aq]ll then review and merge it.
.PP
In any case, thank you very much for your contributions!
+.SH EMBEDDING YOUTUBE\-DL
+.PP
+youtube\-dl makes the best effort to be a good command\-line program,
+and thus should be callable from any programming language.
+If you encounter any problems parsing its output, feel free to create a
+report (https://github.com/rg3/youtube-dl/issues/new).
+.PP
+From a Python program, you can embed youtube\-dl in a more powerful
+fashion, like this:
+.IP
+.nf
+\f[C]
+import\ youtube_dl
+
+ydl_opts\ =\ {}
+with\ youtube_dl.YoutubeDL(ydl_opts)\ as\ ydl:
+\ \ \ \ ydl.download([\[aq]http://www.youtube.com/watch?v=BaW_jenozKc\[aq]])
+\f[]
+.fi
+.PP
+Most likely, you\[aq]ll want to use various options.
+For a list of what can be done, have a look at
+youtube_dl/YoutubeDL.py (https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69).
+For a start, if you want to intercept youtube\-dl\[aq]s output, set a
+\f[C]logger\f[] object.
.SH BUGS
.PP
Bugs and suggestions should be reported at:
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
- opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --proxy --socket-timeout --default-search --ignore-config --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --include-ads --youtube-include-dash-manifest --rate-limit --retries --buffer-size --no-resize-buffer --test --batch-file --id --auto-number --output --autonumber-size --restrict-filenames --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --format --all-formats --prefer-free-formats --max-quality --list-formats --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg"
+ opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --proxy --socket-timeout --default-search --ignore-config --flat-playlist --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --batch-file --id --auto-number --output --autonumber-size --restrict-filenames --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --format --all-formats --prefer-free-formats --max-quality --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg --exec"
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
diropts="--cache-dir"
--- /dev/null
+
+complete --command youtube-dl --long-option help --short-option h --description 'print this help text and exit'
+complete --command youtube-dl --long-option version --description 'print program version and exit'
+complete --command youtube-dl --long-option update --short-option U --description 'update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)'
+complete --command youtube-dl --long-option ignore-errors --short-option i --description 'continue on download errors, for example to skip unavailable videos in a playlist'
+complete --command youtube-dl --long-option abort-on-error --description 'Abort downloading of further videos (in the playlist or the command line) if an error occurs'
+complete --command youtube-dl --long-option dump-user-agent --description 'display the current browser identification'
+complete --command youtube-dl --long-option list-extractors --description 'List all supported extractors and the URLs they would handle'
+complete --command youtube-dl --long-option extractor-descriptions --description 'Output descriptions of all supported extractors'
+complete --command youtube-dl --long-option proxy --description 'Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection'
+complete --command youtube-dl --long-option socket-timeout --description 'Time to wait before giving up, in seconds'
+complete --command youtube-dl --long-option default-search --description 'Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.'
+complete --command youtube-dl --long-option ignore-config --description 'Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)'
+complete --command youtube-dl --long-option flat-playlist --description 'Do not extract the videos of a playlist, only list them.'
+complete --command youtube-dl --long-option playlist-start --description 'playlist video to start at (default is %default)'
+complete --command youtube-dl --long-option playlist-end --description 'playlist video to end at (default is last)'
+complete --command youtube-dl --long-option match-title --description 'download only matching titles (regex or caseless sub-string)'
+complete --command youtube-dl --long-option reject-title --description 'skip download for matching titles (regex or caseless sub-string)'
+complete --command youtube-dl --long-option max-downloads --description 'Abort after downloading NUMBER files'
+complete --command youtube-dl --long-option min-filesize --description 'Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)'
+complete --command youtube-dl --long-option max-filesize --description 'Do not download any videos larger than SIZE (e.g. 50k or 44.6m)'
+complete --command youtube-dl --long-option date --description 'download only videos uploaded in this date'
+complete --command youtube-dl --long-option datebefore --description 'download only videos uploaded on or before this date (i.e. inclusive)'
+complete --command youtube-dl --long-option dateafter --description 'download only videos uploaded on or after this date (i.e. inclusive)'
+complete --command youtube-dl --long-option min-views --description 'Do not download any videos with less than COUNT views'
+complete --command youtube-dl --long-option max-views --description 'Do not download any videos with more than COUNT views'
+complete --command youtube-dl --long-option no-playlist --description 'download only the currently playing video'
+complete --command youtube-dl --long-option age-limit --description 'download only videos suitable for the given age'
+complete --command youtube-dl --long-option download-archive --description 'Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.' --require-parameter
+complete --command youtube-dl --long-option include-ads --description 'Download advertisements as well (experimental)'
+complete --command youtube-dl --long-option rate-limit --short-option r --description 'maximum download rate in bytes per second (e.g. 50K or 4.2M)'
+complete --command youtube-dl --long-option retries --short-option R --description 'number of retries (default is %default)'
+complete --command youtube-dl --long-option buffer-size --description 'size of download buffer (e.g. 1024 or 16K) (default is %default)'
+complete --command youtube-dl --long-option no-resize-buffer --description 'do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.'
+complete --command youtube-dl --long-option test
+complete --command youtube-dl --long-option batch-file --short-option a --description 'file containing URLs to download ('"'"'-'"'"' for stdin)' --require-parameter
+complete --command youtube-dl --long-option id --description 'use only video ID in file name'
+complete --command youtube-dl --long-option auto-number --short-option A --description 'number downloaded files starting from 00000'
+complete --command youtube-dl --long-option output --short-option o --description 'output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like Youtube'"'"'s itags: "137"), %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id, %(playlist)s for the playlist the video is in, %(playlist_index)s for the position in the playlist and %% for a literal percent. %(height)s and %(width)s for the width and height of the video format. %(resolution)s for a textual description of the resolution of the video format. Use - to output to stdout. Can also be used to download to a different directory, for example with -o '"'"'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s'"'"' .'
+complete --command youtube-dl --long-option autonumber-size --description 'Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given'
+complete --command youtube-dl --long-option restrict-filenames --description 'Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames'
+complete --command youtube-dl --long-option title --short-option t --description '[deprecated] use title in file name (default)'
+complete --command youtube-dl --long-option literal --short-option l --description '[deprecated] alias of --title'
+complete --command youtube-dl --long-option no-overwrites --short-option w --description 'do not overwrite files'
+complete --command youtube-dl --long-option continue --short-option c --description 'force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.'
+complete --command youtube-dl --long-option no-continue --description 'do not resume partially downloaded files (restart from beginning)'
+complete --command youtube-dl --long-option no-part --description 'do not use .part files - write directly into output file'
+complete --command youtube-dl --long-option no-mtime --description 'do not use the Last-modified header to set the file modification time'
+complete --command youtube-dl --long-option write-description --description 'write video description to a .description file'
+complete --command youtube-dl --long-option write-info-json --description 'write video metadata to a .info.json file'
+complete --command youtube-dl --long-option write-annotations --description 'write video annotations to a .annotation file'
+complete --command youtube-dl --long-option write-thumbnail --description 'write thumbnail image to disk'
+complete --command youtube-dl --long-option load-info --description 'json file containing the video information (created with the "--write-json" option)' --require-parameter
+complete --command youtube-dl --long-option cookies --description 'file to read cookies from and dump cookie jar in' --require-parameter
+complete --command youtube-dl --long-option cache-dir --description 'Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.'
+complete --command youtube-dl --long-option no-cache-dir --description 'Disable filesystem caching'
+complete --command youtube-dl --long-option rm-cache-dir --description 'Delete all filesystem cache files'
+complete --command youtube-dl --long-option quiet --short-option q --description 'activates quiet mode'
+complete --command youtube-dl --long-option no-warnings --description 'Ignore warnings'
+complete --command youtube-dl --long-option simulate --short-option s --description 'do not download the video and do not write anything to disk'
+complete --command youtube-dl --long-option skip-download --description 'do not download the video'
+complete --command youtube-dl --long-option get-url --short-option g --description 'simulate, quiet but print URL'
+complete --command youtube-dl --long-option get-title --short-option e --description 'simulate, quiet but print title'
+complete --command youtube-dl --long-option get-id --description 'simulate, quiet but print id'
+complete --command youtube-dl --long-option get-thumbnail --description 'simulate, quiet but print thumbnail URL'
+complete --command youtube-dl --long-option get-description --description 'simulate, quiet but print video description'
+complete --command youtube-dl --long-option get-duration --description 'simulate, quiet but print video length'
+complete --command youtube-dl --long-option get-filename --description 'simulate, quiet but print output filename'
+complete --command youtube-dl --long-option get-format --description 'simulate, quiet but print output format'
+complete --command youtube-dl --long-option dump-json --short-option j --description 'simulate, quiet but print JSON information. See --output for a description of available keys.'
+complete --command youtube-dl --long-option dump-single-json --short-option J --description 'simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.'
+complete --command youtube-dl --long-option newline --description 'output progress bar as new lines'
+complete --command youtube-dl --long-option no-progress --description 'do not print progress bar'
+complete --command youtube-dl --long-option console-title --description 'display progress in console titlebar'
+complete --command youtube-dl --long-option verbose --short-option v --description 'print various debugging information'
+complete --command youtube-dl --long-option dump-intermediate-pages --description 'print downloaded pages to debug problems (very verbose)'
+complete --command youtube-dl --long-option write-pages --description 'Write downloaded intermediary pages to files in the current directory to debug problems'
+complete --command youtube-dl --long-option youtube-print-sig-code
+complete --command youtube-dl --long-option print-traffic --description 'Display sent and read HTTP traffic'
+complete --command youtube-dl --long-option encoding --description 'Force the specified encoding (experimental)'
+complete --command youtube-dl --long-option no-check-certificate --description 'Suppress HTTPS certificate validation.'
+complete --command youtube-dl --long-option prefer-insecure --description 'Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)'
+complete --command youtube-dl --long-option user-agent --description 'specify a custom user agent'
+complete --command youtube-dl --long-option referer --description 'specify a custom referer, use if the video access is restricted to one domain'
+complete --command youtube-dl --long-option add-header --description 'specify a custom HTTP header and its value, separated by a colon '"'"':'"'"'. You can use this option multiple times'
+complete --command youtube-dl --long-option bidi-workaround --description 'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH'
+complete --command youtube-dl --long-option format --short-option f --description 'video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio'
+complete --command youtube-dl --long-option all-formats --description 'download all available video formats'
+complete --command youtube-dl --long-option prefer-free-formats --description 'prefer free video formats unless a specific one is requested'
+complete --command youtube-dl --long-option max-quality --description 'highest quality format to download'
+complete --command youtube-dl --long-option list-formats --short-option F --description 'list all available formats'
+complete --command youtube-dl --long-option youtube-include-dash-manifest
+complete --command youtube-dl --long-option youtube-skip-dash-manifest --description 'Do not download the DASH manifest on YouTube videos'
+complete --command youtube-dl --long-option write-sub --description 'write subtitle file'
+complete --command youtube-dl --long-option write-auto-sub --description 'write automatic subtitle file (youtube only)'
+complete --command youtube-dl --long-option all-subs --description 'downloads all the available subtitles of the video'
+complete --command youtube-dl --long-option list-subs --description 'lists all available subtitles for the video'
+complete --command youtube-dl --long-option sub-format --description 'subtitle format (default=srt) ([sbv/vtt] youtube only)'
+complete --command youtube-dl --long-option sub-lang --description 'languages of the subtitles to download (optional) separated by commas, use IETF language tags like '"'"'en,pt'"'"''
+complete --command youtube-dl --long-option username --short-option u --description 'login with this account ID'
+complete --command youtube-dl --long-option password --short-option p --description 'account password'
+complete --command youtube-dl --long-option twofactor --short-option 2 --description 'two-factor auth code'
+complete --command youtube-dl --long-option netrc --short-option n --description 'use .netrc authentication data'
+complete --command youtube-dl --long-option video-password --description 'video password (vimeo, smotri)'
+complete --command youtube-dl --long-option extract-audio --short-option x --description 'convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)'
+complete --command youtube-dl --long-option audio-format --description '"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default'
+complete --command youtube-dl --long-option audio-quality --description 'ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)'
+complete --command youtube-dl --long-option recode-video --description 'Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)' --arguments 'mp4 flv ogg webm mkv' --exclusive
+complete --command youtube-dl --long-option keep-video --short-option k --description 'keeps the video file on disk after the post-processing; the video is erased by default'
+complete --command youtube-dl --long-option no-post-overwrites --description 'do not overwrite post-processed files; the post-processed files are overwritten by default'
+complete --command youtube-dl --long-option embed-subs --description 'embed subtitles in the video (only for mp4 videos)'
+complete --command youtube-dl --long-option embed-thumbnail --description 'embed thumbnail in the audio as cover art'
+complete --command youtube-dl --long-option add-metadata --description 'write metadata to the video file'
+complete --command youtube-dl --long-option xattrs --description 'write metadata to the video file'"'"'s xattrs (using dublin core and xdg standards)'
+complete --command youtube-dl --long-option prefer-avconv --description 'Prefer avconv over ffmpeg for running the postprocessors (default)'
+complete --command youtube-dl --long-option prefer-ffmpeg --description 'Prefer ffmpeg over avconv for running the postprocessors'
+complete --command youtube-dl --long-option exec --description 'Execute a command on the file after downloading, similar to find'"'"'s -exec syntax. Example: --exec '"'"'adb push {} /sdcard/Music/ && rm {}'"'"''
+
+
+complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
--- /dev/null
+#compdef youtube-dl
+
+__youtube_dl() {
+ local curcontext="$curcontext" fileopts diropts cur prev
+ typeset -A opt_args
+ fileopts="--download-archive|-a|--batch-file|--load-info|--cookies"
+ diropts="--cache-dir"
+ cur=$words[CURRENT]
+ case $cur in
+ :)
+ _arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
+ ;;
+ *)
+ prev=$words[CURRENT-1]
+ if [[ ${prev} =~ ${fileopts} ]]; then
+ _path_files
+ elif [[ ${prev} =~ ${diropts} ]]; then
+ _path_files -/
+ elif [[ ${prev} == "--recode-video" ]]; then
+ _arguments '*: :(mp4 flv ogg webm mkv)'
+ else
+ _arguments '*: :(--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --proxy --socket-timeout --default-search --ignore-config --flat-playlist --playlist-start --playlist-end --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --no-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --batch-file --id --auto-number --output --autonumber-size --restrict-filenames --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --write-thumbnail --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --newline --no-progress --console-title --verbose --dump-intermediate-pages --write-pages --youtube-print-sig-code --print-traffic --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --format --all-formats --prefer-free-formats --max-quality --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --xattrs --prefer-avconv --prefer-ffmpeg --exec)'
+ fi
+ ;;
+ esac
+}
+
+__youtube_dl
\ No newline at end of file
from .utils import (
compat_cookiejar,
+ compat_expanduser,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_request,
+ escape_url,
ContentTooShortError,
date_from_str,
DateRange,
YoutubeDLHandler,
prepend_extension,
)
+from .cache import Cache
from .extractor import get_info_extractor, gen_extractors
from .downloader import get_suitable_downloader
-from .postprocessor import FFmpegMergerPP
+from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
from .version import __version__
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
+ dump_single_json: Force printing the info_dict of the whole playlist
+ (or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
- None to disable filesystem cache.
+ False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
+ extract_flat: Do not resolve URLs, return the immediate result.
+ Pass in 'in_playlist' to only show this behavior for
+ playlist items.
The following parameters are not used by YoutubeDL itself, they are used by
the FileDownloader:
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
otherwise prefer avconv.
+ exec_cmd: Arbitrary command to run after downloading
"""
params = None
_num_downloads = None
_screen_file = None
- def __init__(self, params=None):
+ def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = params
+ self.cache = Cache(self)
if params.get('bidi_workaround', False):
try:
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
- and not params['restrictfilenames']):
+ and not params.get('restrictfilenames', False)):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
- 'cannot encode all charactes. '
+ 'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
self._setup_opener()
+ if auto_init:
+ self.print_debug_header()
+ self.add_default_info_extractors()
+
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
autonumber_templ = '%0' + str(autonumber_size) + 'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
if template_dict.get('playlist_index') is not None:
- template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
+ template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
- tmpl = os.path.expanduser(outtmpl)
+ tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
return filename
except ValueError as err:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
age_limit = self.params.get('age_limit')
if age_limit is not None:
- if age_limit < info_dict.get('age_limit', 0):
+ actual_age_limit = info_dict.get('age_limit')
+ if actual_age_limit is None:
+ actual_age_limit = 0
+ if age_limit < actual_age_limit:
return 'Skipping "' + title + '" because it is age restricted'
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
Returns the resolved ie_result.
"""
- result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
+ result_type = ie_result.get('_type', 'video')
+
+ if result_type in ('url', 'url_transparent'):
+ extract_flat = self.params.get('extract_flat', False)
+ if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
+ extract_flat is True):
+ if self.params.get('forcejson', False):
+ self.to_stdout(json.dumps(ie_result))
+ return ie_result
+
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
extra = {
+ 'n_entries': n_entries,
'playlist': playlist,
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
if video_formats:
return video_formats[0]
else:
- extensions = ['mp4', 'flv', 'webm', '3gp']
+ extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
if req_format in ('-1', 'all'):
formats_to_download = formats
else:
- # We can accept formats requested in the format: 34/5/best, we pick
- # the first that is available, starting from left
- req_formats = req_format.split('/')
- for rf in req_formats:
- if re.match(r'.+?\+.+?', rf) is not None:
- # Two formats have been requested like '137+139'
- format_1, format_2 = rf.split('+')
- formats_info = (self.select_format(format_1, formats),
- self.select_format(format_2, formats))
- if all(formats_info):
- selected_format = {
- 'requested_formats': formats_info,
- 'format': rf,
- 'ext': formats_info[0]['ext'],
- }
+ for rfstr in req_format.split(','):
+ # We can accept formats requested in the format: 34/5/best, we pick
+ # the first that is available, starting from left
+ req_formats = rfstr.split('/')
+ for rf in req_formats:
+ if re.match(r'.+?\+.+?', rf) is not None:
+ # Two formats have been requested like '137+139'
+ format_1, format_2 = rf.split('+')
+ formats_info = (self.select_format(format_1, formats),
+ self.select_format(format_2, formats))
+ if all(formats_info):
+ selected_format = {
+ 'requested_formats': formats_info,
+ 'format': rf,
+ 'ext': formats_info[0]['ext'],
+ }
+ else:
+ selected_format = None
else:
- selected_format = None
- else:
- selected_format = self.select_format(rf, formats)
- if selected_format is not None:
- formats_to_download = [selected_format]
- break
+ selected_format = self.select_format(rf, formats)
+ if selected_format is not None:
+ formats_to_download.append(selected_format)
+ break
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if self.params.get('forcejson', False):
info_dict['_filename'] = filename
self.to_stdout(json.dumps(info_dict))
+ if self.params.get('dump_single_json', False):
+ info_dict['_filename'] = filename
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
downloaded = []
success = True
merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
- if not merger._get_executable():
+ if not merger._executable:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
for url in url_list:
try:
#It also downloads the videos
- self.extract_info(url)
+ res = self.extract_info(url)
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
+ else:
+ if self.params.get('dump_single_json', False):
+ self.to_stdout(json.dumps(res))
return self._download_retcode
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
+ if fdict.get('fps') is not None:
+ res += ', %sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
def urlopen(self, req):
""" Start an HTTP download """
+
+ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
+ # always respected by websites, some tend to give out URLs with non percent-encoded
+ # non-ASCII characters (see telemb.py, ard.py [#3412])
+ # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
+ # To work around aforementioned issue we will replace request's original URL with
+ # percent-encoded one
+ req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
+ url = req if req_is_string else req.get_full_url()
+ url_escaped = escape_url(url)
+
+ # Substitute URL if any change after escaping
+ if url != url_escaped:
+ if req_is_string:
+ req = url_escaped
+ else:
+ req = compat_urllib_request.Request(
+ url_escaped, data=req.data, headers=req.headers,
+ origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
+
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
sys.exc_clear()
except:
pass
- self._write_string('[debug] Python version %s - %s' %
- (platform.python_version(), platform_name()) + '\n')
+ self._write_string('[debug] Python version %s - %s\n' % (
+ platform.python_version(), platform_name()))
+
+ exe_versions = FFmpegPostProcessor.get_versions()
+ exe_str = ', '.join(
+ '%s %s' % (exe, v)
+ for exe, v in sorted(exe_versions.items())
+ if v
+ )
+ if not exe_str:
+ exe_str = 'none'
+ self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-__authors__ = (
- 'Ricardo Garcia Gonzalez',
- 'Danny Colligan',
- 'Benjamin Johnson',
- 'Vasyl\' Vavrychuk',
- 'Witold Baryluk',
- 'Paweł Paprota',
- 'Gergely Imreh',
- 'Rogério Brito',
- 'Philipp Hagemeister',
- 'Sören Schulze',
- 'Kevin Ngo',
- 'Ori Avtalion',
- 'shizeeg',
- 'Filippo Valsorda',
- 'Christian Albrecht',
- 'Dave Vasilevsky',
- 'Jaime Marquínez Ferrándiz',
- 'Jeff Crouse',
- 'Osama Khalid',
- 'Michael Walter',
- 'M. Yasoob Ullah Khalid',
- 'Julien Fraichard',
- 'Johny Mo Swag',
- 'Axel Noack',
- 'Albert Kim',
- 'Pierre Rudloff',
- 'Huarong Huo',
- 'Ismael Mejía',
- 'Steffan \'Ruirize\' James',
- 'Andras Elso',
- 'Jelle van der Waa',
- 'Marcin Cieślak',
- 'Anton Larionov',
- 'Takuya Tsuchida',
- 'Sergey M.',
- 'Michael Orlitzky',
- 'Chris Gahan',
- 'Saimadhav Heblikar',
- 'Mike Col',
- 'Oleg Prutz',
- 'pulpe',
- 'Andreas Schmitz',
- 'Michael Kaiser',
- 'Niklas Laxström',
- 'David Triendl',
- 'Anthony Weems',
- 'David Wagner',
- 'Juan C. Olivares',
- 'Mattias Harrysson',
- 'phaer',
- 'Sainyam Kapoor',
- 'Nicolas Évrard',
- 'Jason Normore',
- 'Hoje Lee',
- 'Adam Thalhammer',
- 'Georg Jähnig',
- 'Ralf Haring',
- 'Koki Takahashi',
- 'Ariset Llerena',
- 'Adam Malcontenti-Wilson',
- 'Tobias Bell',
- 'Naglis Jonaitis',
- 'Charles Chen',
- 'Hassaan Ali',
- 'Dobrosław Żybort',
-)
-
__license__ = 'Public Domain'
import codecs
import io
-import optparse
import os
import random
-import shlex
-import shutil
import sys
+from .options import (
+ parseOpts,
+)
from .utils import (
+ compat_expanduser,
compat_getpass,
compat_print,
DateRange,
DEFAULT_OUTTMPL,
decodeOption,
- get_term_width,
DownloadError,
- get_cachedir,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
FileDownloader,
)
from .extractor import gen_extractors
-from .version import __version__
from .YoutubeDL import YoutubeDL
from .postprocessor import (
AtomicParsleyPP,
FFmpegExtractAudioPP,
FFmpegEmbedSubtitlePP,
XAttrMetadataPP,
+ ExecAfterDownloadPP,
)
-def parseOpts(overrideArguments=None):
- def _readOptions(filename_bytes, default=[]):
- try:
- optionf = open(filename_bytes)
- except IOError:
- return default # silently skip if file is not present
- try:
- res = []
- for l in optionf:
- res += shlex.split(l, comments=True)
- finally:
- optionf.close()
- return res
-
- def _readUserConf():
- xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
- if xdg_config_home:
- userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
- if not os.path.isfile(userConfFile):
- userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
- else:
- userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
- if not os.path.isfile(userConfFile):
- userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
- userConf = _readOptions(userConfFile, None)
-
- if userConf is None:
- appdata_dir = os.environ.get('appdata')
- if appdata_dir:
- userConf = _readOptions(
- os.path.join(appdata_dir, 'youtube-dl', 'config'),
- default=None)
- if userConf is None:
- userConf = _readOptions(
- os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
- default=None)
-
- if userConf is None:
- userConf = _readOptions(
- os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
- default=None)
- if userConf is None:
- userConf = _readOptions(
- os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
- default=None)
-
- if userConf is None:
- userConf = []
-
- return userConf
-
- def _format_option_string(option):
- ''' ('-o', '--option') -> -o, --format METAVAR'''
-
- opts = []
-
- if option._short_opts:
- opts.append(option._short_opts[0])
- if option._long_opts:
- opts.append(option._long_opts[0])
- if len(opts) > 1:
- opts.insert(1, ', ')
-
- if option.takes_value(): opts.append(' %s' % option.metavar)
-
- return "".join(opts)
-
- def _comma_separated_values_options_callback(option, opt_str, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
- def _hide_login_info(opts):
- opts = list(opts)
- for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
- try:
- i = opts.index(private_opt)
- opts[i+1] = '<PRIVATE>'
- except ValueError:
- pass
- return opts
-
- max_width = 80
- max_help_position = 80
-
- # No need to wrap help messages if we're on a wide console
- columns = get_term_width()
- if columns: max_width = columns
-
- fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
- fmt.format_option_strings = _format_option_string
-
- kw = {
- 'version' : __version__,
- 'formatter' : fmt,
- 'usage' : '%prog [options] url [url...]',
- 'conflict_handler' : 'resolve',
- }
-
- parser = optparse.OptionParser(**kw)
-
- # option groups
- general = optparse.OptionGroup(parser, 'General Options')
- selection = optparse.OptionGroup(parser, 'Video Selection')
- authentication = optparse.OptionGroup(parser, 'Authentication Options')
- video_format = optparse.OptionGroup(parser, 'Video Format Options')
- subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
- downloader = optparse.OptionGroup(parser, 'Download Options')
- postproc = optparse.OptionGroup(parser, 'Post-processing Options')
- filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
- workarounds = optparse.OptionGroup(parser, 'Workarounds')
- verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-
- general.add_option('-h', '--help',
- action='help', help='print this help text and exit')
- general.add_option('-v', '--version',
- action='version', help='print program version and exit')
- general.add_option('-U', '--update',
- action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
- general.add_option('-i', '--ignore-errors',
- action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
- general.add_option('--abort-on-error',
- action='store_false', dest='ignoreerrors',
- help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
- general.add_option('--dump-user-agent',
- action='store_true', dest='dump_user_agent',
- help='display the current browser identification', default=False)
- general.add_option('--list-extractors',
- action='store_true', dest='list_extractors',
- help='List all supported extractors and the URLs they would handle', default=False)
- general.add_option('--extractor-descriptions',
- action='store_true', dest='list_extractor_descriptions',
- help='Output descriptions of all supported extractors', default=False)
- general.add_option(
- '--proxy', dest='proxy', default=None, metavar='URL',
- help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
- general.add_option(
- '--socket-timeout', dest='socket_timeout',
- type=float, default=None, help=u'Time to wait before giving up, in seconds')
- general.add_option(
- '--default-search',
- dest='default_search', metavar='PREFIX',
- help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
- general.add_option(
- '--ignore-config',
- action='store_true',
- help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
-
- selection.add_option(
- '--playlist-start',
- dest='playliststart', metavar='NUMBER', default=1, type=int,
- help='playlist video to start at (default is %default)')
- selection.add_option(
- '--playlist-end',
- dest='playlistend', metavar='NUMBER', default=None, type=int,
- help='playlist video to end at (default is last)')
- selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
- selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
- selection.add_option('--max-downloads', metavar='NUMBER',
- dest='max_downloads', type=int, default=None,
- help='Abort after downloading NUMBER files')
- selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
- selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
- selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
- selection.add_option(
- '--datebefore', metavar='DATE', dest='datebefore', default=None,
- help='download only videos uploaded on or before this date (i.e. inclusive)')
- selection.add_option(
- '--dateafter', metavar='DATE', dest='dateafter', default=None,
- help='download only videos uploaded on or after this date (i.e. inclusive)')
- selection.add_option(
- '--min-views', metavar='COUNT', dest='min_views',
- default=None, type=int,
- help="Do not download any videos with less than COUNT views",)
- selection.add_option(
- '--max-views', metavar='COUNT', dest='max_views',
- default=None, type=int,
- help="Do not download any videos with more than COUNT views",)
- selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
- selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
- help='download only videos suitable for the given age',
- default=None, type=int)
- selection.add_option('--download-archive', metavar='FILE',
- dest='download_archive',
- help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
- selection.add_option(
- '--include-ads', dest='include_ads',
- action='store_true',
- help='Download advertisements as well (experimental)')
- selection.add_option(
- '--youtube-include-dash-manifest', action='store_true',
- dest='youtube_include_dash_manifest', default=False,
- help='Try to download the DASH manifest on YouTube videos (experimental)')
-
- authentication.add_option('-u', '--username',
- dest='username', metavar='USERNAME', help='account username')
- authentication.add_option('-p', '--password',
- dest='password', metavar='PASSWORD', help='account password')
- authentication.add_option('-n', '--netrc',
- action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
- authentication.add_option('--video-password',
- dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
-
-
- video_format.add_option('-f', '--format',
- action='store', dest='format', metavar='FORMAT', default=None,
- help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
- video_format.add_option('--all-formats',
- action='store_const', dest='format', help='download all available video formats', const='all')
- video_format.add_option('--prefer-free-formats',
- action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
- video_format.add_option('--max-quality',
- action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
- video_format.add_option('-F', '--list-formats',
- action='store_true', dest='listformats', help='list all available formats')
-
- subtitles.add_option('--write-sub', '--write-srt',
- action='store_true', dest='writesubtitles',
- help='write subtitle file', default=False)
- subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
- action='store_true', dest='writeautomaticsub',
- help='write automatic subtitle file (youtube only)', default=False)
- subtitles.add_option('--all-subs',
- action='store_true', dest='allsubtitles',
- help='downloads all the available subtitles of the video', default=False)
- subtitles.add_option('--list-subs',
- action='store_true', dest='listsubtitles',
- help='lists all available subtitles for the video', default=False)
- subtitles.add_option('--sub-format',
- action='store', dest='subtitlesformat', metavar='FORMAT',
- help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
- subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
- action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
- default=[], callback=_comma_separated_values_options_callback,
- help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
-
- downloader.add_option('-r', '--rate-limit',
- dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
- downloader.add_option('-R', '--retries',
- dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
- downloader.add_option('--buffer-size',
- dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
- downloader.add_option('--no-resize-buffer',
- action='store_true', dest='noresizebuffer',
- help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
- downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
-
- workarounds.add_option(
- '--encoding', dest='encoding', metavar='ENCODING',
- help='Force the specified encoding (experimental)')
- workarounds.add_option(
- '--no-check-certificate', action='store_true',
- dest='no_check_certificate', default=False,
- help='Suppress HTTPS certificate validation.')
- workarounds.add_option(
- '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
- help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
- workarounds.add_option(
- '--user-agent', metavar='UA',
- dest='user_agent', help='specify a custom user agent')
- workarounds.add_option(
- '--referer', metavar='REF',
- dest='referer', default=None,
- help='specify a custom referer, use if the video access is restricted to one domain',
- )
- workarounds.add_option(
- '--add-header', metavar='FIELD:VALUE',
- dest='headers', action='append',
- help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
- )
- workarounds.add_option(
- '--bidi-workaround', dest='bidi_workaround', action='store_true',
- help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
-
- verbosity.add_option('-q', '--quiet',
- action='store_true', dest='quiet', help='activates quiet mode', default=False)
- verbosity.add_option(
- '--no-warnings',
- dest='no_warnings', action='store_true', default=False,
- help='Ignore warnings')
- verbosity.add_option('-s', '--simulate',
- action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
- verbosity.add_option('--skip-download',
- action='store_true', dest='skip_download', help='do not download the video', default=False)
- verbosity.add_option('-g', '--get-url',
- action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
- verbosity.add_option('-e', '--get-title',
- action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
- verbosity.add_option('--get-id',
- action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
- verbosity.add_option('--get-thumbnail',
- action='store_true', dest='getthumbnail',
- help='simulate, quiet but print thumbnail URL', default=False)
- verbosity.add_option('--get-description',
- action='store_true', dest='getdescription',
- help='simulate, quiet but print video description', default=False)
- verbosity.add_option('--get-duration',
- action='store_true', dest='getduration',
- help='simulate, quiet but print video length', default=False)
- verbosity.add_option('--get-filename',
- action='store_true', dest='getfilename',
- help='simulate, quiet but print output filename', default=False)
- verbosity.add_option('--get-format',
- action='store_true', dest='getformat',
- help='simulate, quiet but print output format', default=False)
- verbosity.add_option('-j', '--dump-json',
- action='store_true', dest='dumpjson',
- help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
- verbosity.add_option('--newline',
- action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
- verbosity.add_option('--no-progress',
- action='store_true', dest='noprogress', help='do not print progress bar', default=False)
- verbosity.add_option('--console-title',
- action='store_true', dest='consoletitle',
- help='display progress in console titlebar', default=False)
- verbosity.add_option('-v', '--verbose',
- action='store_true', dest='verbose', help='print various debugging information', default=False)
- verbosity.add_option('--dump-intermediate-pages',
- action='store_true', dest='dump_intermediate_pages', default=False,
- help='print downloaded pages to debug problems (very verbose)')
- verbosity.add_option('--write-pages',
- action='store_true', dest='write_pages', default=False,
- help='Write downloaded intermediary pages to files in the current directory to debug problems')
- verbosity.add_option('--youtube-print-sig-code',
- action='store_true', dest='youtube_print_sig_code', default=False,
- help=optparse.SUPPRESS_HELP)
- verbosity.add_option('--print-traffic',
- dest='debug_printtraffic', action='store_true', default=False,
- help='Display sent and read HTTP traffic')
-
-
- filesystem.add_option('-a', '--batch-file',
- dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
- filesystem.add_option('--id',
- action='store_true', dest='useid', help='use only video ID in file name', default=False)
- filesystem.add_option('-A', '--auto-number',
- action='store_true', dest='autonumber',
- help='number downloaded files starting from 00000', default=False)
- filesystem.add_option('-o', '--output',
- dest='outtmpl', metavar='TEMPLATE',
- help=('output filename template. Use %(title)s to get the title, '
- '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
- '%(autonumber)s to get an automatically incremented number, '
- '%(ext)s for the filename extension, '
- '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
- '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
- '%(upload_date)s for the upload date (YYYYMMDD), '
- '%(extractor)s for the provider (youtube, metacafe, etc), '
- '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
- '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
- '%(height)s and %(width)s for the width and height of the video format. '
- '%(resolution)s for a textual description of the resolution of the video format. '
- 'Use - to output to stdout. Can also be used to download to a different directory, '
- 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
- filesystem.add_option('--autonumber-size',
- dest='autonumber_size', metavar='NUMBER',
- help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
- filesystem.add_option('--restrict-filenames',
- action='store_true', dest='restrictfilenames',
- help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
- filesystem.add_option('-t', '--title',
- action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
- filesystem.add_option('-l', '--literal',
- action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
- filesystem.add_option('-w', '--no-overwrites',
- action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
- filesystem.add_option('-c', '--continue',
- action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
- filesystem.add_option('--no-continue',
- action='store_false', dest='continue_dl',
- help='do not resume partially downloaded files (restart from beginning)')
- filesystem.add_option('--no-part',
- action='store_true', dest='nopart', help='do not use .part files', default=False)
- filesystem.add_option('--no-mtime',
- action='store_false', dest='updatetime',
- help='do not use the Last-modified header to set the file modification time', default=True)
- filesystem.add_option('--write-description',
- action='store_true', dest='writedescription',
- help='write video description to a .description file', default=False)
- filesystem.add_option('--write-info-json',
- action='store_true', dest='writeinfojson',
- help='write video metadata to a .info.json file', default=False)
- filesystem.add_option('--write-annotations',
- action='store_true', dest='writeannotations',
- help='write video annotations to a .annotation file', default=False)
- filesystem.add_option('--write-thumbnail',
- action='store_true', dest='writethumbnail',
- help='write thumbnail image to disk', default=False)
- filesystem.add_option('--load-info',
- dest='load_info_filename', metavar='FILE',
- help='json file containing the video information (created with the "--write-json" option)')
- filesystem.add_option('--cookies',
- dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
- filesystem.add_option(
- '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
- help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
- filesystem.add_option(
- '--no-cache-dir', action='store_const', const=None, dest='cachedir',
- help='Disable filesystem caching')
- filesystem.add_option(
- '--rm-cache-dir', action='store_true', dest='rm_cachedir',
- help='Delete all filesystem cache files')
-
-
- postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
- help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
- postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
- help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
- postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
- help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
- postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
- help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
- postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
- help='keeps the video file on disk after the post-processing; the video is erased by default')
- postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
- help='do not overwrite post-processed files; the post-processed files are overwritten by default')
- postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
- help='embed subtitles in the video (only for mp4 videos)')
- postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
- help='embed thumbnail in the audio as cover art')
- postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
- help='write metadata to the video file')
- postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
- help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
- postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
- help='Prefer avconv over ffmpeg for running the postprocessors (default)')
- postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
- help='Prefer ffmpeg over avconv for running the postprocessors')
-
-
- parser.add_option_group(general)
- parser.add_option_group(selection)
- parser.add_option_group(downloader)
- parser.add_option_group(filesystem)
- parser.add_option_group(verbosity)
- parser.add_option_group(workarounds)
- parser.add_option_group(video_format)
- parser.add_option_group(subtitles)
- parser.add_option_group(authentication)
- parser.add_option_group(postproc)
-
- if overrideArguments is not None:
- opts, args = parser.parse_args(overrideArguments)
- if opts.verbose:
- write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
- else:
- commandLineConf = sys.argv[1:]
- if '--ignore-config' in commandLineConf:
- systemConf = []
- userConf = []
- else:
- systemConf = _readOptions('/etc/youtube-dl.conf')
- if '--ignore-config' in systemConf:
- userConf = []
- else:
- userConf = _readUserConf()
- argv = systemConf + userConf + commandLineConf
-
- opts, args = parser.parse_args(argv)
- if opts.verbose:
- write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
- write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
- write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
-
- return parser, opts, args
-
-
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
- if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
- parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
u' template'.format(outtmpl))
- any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
- download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
+ any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
+ download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
+ 'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'quiet': (opts.quiet or any_printing),
'no_warnings': opts.no_warnings,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson,
- 'simulate': opts.simulate,
- 'skip_download': (opts.skip_download or opts.simulate or any_printing),
+ 'dump_single_json': opts.dump_single_json,
+ 'simulate': opts.simulate or any_printing,
+ 'skip_download': opts.skip_download,
'format': opts.format,
'format_limit': opts.format_limit,
'listformats': opts.listformats,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
+ 'exec_cmd': opts.exec_cmd,
+ 'extract_flat': opts.extract_flat,
}
with YoutubeDL(ydl_opts) as ydl:
- ydl.print_debug_header()
- ydl.add_default_info_extractors()
-
# PostProcessors
# Add the metadata pp first, the other pps will copy it
if opts.addmetadata:
ydl.add_post_processor(FFmpegAudioFixPP())
ydl.add_post_processor(AtomicParsleyPP())
+
+ # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
+ # So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
+ if opts.exec_cmd:
+ ydl.add_post_processor(ExecAfterDownloadPP(
+ verboseOutput=opts.verbose, exec_cmd=opts.exec_cmd))
+
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose)
# Remove cache dir
if opts.rm_cachedir:
- if opts.cachedir is None:
- ydl.to_screen(u'No cache dir specified (Did you combine --no-cache-dir and --rm-cache-dir?)')
- else:
- if ('.cache' not in opts.cachedir) or ('youtube-dl' not in opts.cachedir):
- ydl.to_screen(u'Not removing directory %s - this does not look like a cache dir')
- retcode = 141
- else:
- ydl.to_screen(
- u'Removing cache dir %s .' % opts.cachedir,
- skip_eol=True)
- if os.path.exists(opts.cachedir):
- ydl.to_screen(u'.', skip_eol=True)
- shutil.rmtree(opts.cachedir)
- ydl.to_screen(u'.')
+ ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
--- /dev/null
+from __future__ import unicode_literals
+
+import errno
+import io
+import json
+import os
+import re
+import shutil
+import traceback
+
+from .utils import (
+ compat_expanduser,
+ write_json_file,
+)
+
+
+class Cache(object):
+ def __init__(self, ydl):
+ self._ydl = ydl
+
+ def _get_root_dir(self):
+ res = self._ydl.params.get('cachedir')
+ if res is None:
+ cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
+ res = os.path.join(cache_root, 'youtube-dl')
+ return compat_expanduser(res)
+
+ def _get_cache_fn(self, section, key, dtype):
+ assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
+ 'invalid section %r' % section
+ assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
+ return os.path.join(
+ self._get_root_dir(), section, '%s.%s' % (key, dtype))
+
+ @property
+ def enabled(self):
+ return self._ydl.params.get('cachedir') is not False
+
+ def store(self, section, key, data, dtype='json'):
+ assert dtype in ('json',)
+
+ if not self.enabled:
+ return
+
+ fn = self._get_cache_fn(section, key, dtype)
+ try:
+ try:
+ os.makedirs(os.path.dirname(fn))
+ except OSError as ose:
+ if ose.errno != errno.EEXIST:
+ raise
+ write_json_file(data, fn)
+ except Exception:
+ tb = traceback.format_exc()
+ self._ydl.report_warning(
+ 'Writing cache to %r failed: %s' % (fn, tb))
+
+ def load(self, section, key, dtype='json', default=None):
+ assert dtype in ('json',)
+
+ if not self.enabled:
+ return default
+
+ cache_fn = self._get_cache_fn(section, key, dtype)
+ try:
+ try:
+ with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
+ return json.load(cachef)
+ except ValueError:
+ try:
+ file_size = os.path.getsize(cache_fn)
+ except (OSError, IOError) as oe:
+ file_size = str(oe)
+ self._ydl.report_warning(
+ 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
+ except IOError:
+ pass # No cache available
+
+ return default
+
+ def remove(self):
+ if not self.enabled:
+ self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
+ return
+
+ cachedir = self._get_root_dir()
+ if not any((term in cachedir) for term in ('cache', 'tmp')):
+ raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
+
+ self._ydl.to_screen(
+ 'Removing cache dir %s .' % cachedir, skip_eol=True)
+ if os.path.exists(cachedir):
+ self._ydl.to_screen('.', skip_eol=True)
+ shutil.rmtree(cachedir)
+ self._ydl.to_screen('.')
from .common import FileDownloader
from .hls import HlsFD
+from .hls import NativeHlsFD
from .http import HttpFD
from .mplayer import MplayerFD
from .rtmp import RtmpFD
if url.startswith('rtmp'):
return RtmpFD
+ if protocol == 'm3u8_native':
+ return NativeHlsFD
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
return HlsFD
if url.startswith('mms') or url.startswith('rtsp'):
Subclasses of this one must re-define the real_download method.
"""
+ _TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
- raise NotImplementedError(u'This method must be implemented by sublcasses')
+ raise NotImplementedError(u'This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
format_bytes,
encodeFilename,
sanitize_open,
+ xpath_text,
)
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
- bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
+ bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
+ if bootstrap_node.text is None:
+ bootstrap_url = compat_urlparse.urljoin(
+ base_url, bootstrap_node.attrib['url'])
+ bootstrap = self.ydl.urlopen(bootstrap_url).read()
+ else:
+ bootstrap = base64.b64decode(bootstrap_node.text)
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
boot_info = read_bootstrap_info(bootstrap)
+
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
+ # For some akamai manifests we'll need to add a query to the fragment url
+ akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
+ if akamai_pv:
+ url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:
+from __future__ import unicode_literals
+
import os
+import re
import subprocess
from .common import FileDownloader
from ..utils import (
+ compat_urlparse,
+ compat_urllib_request,
+ check_executable,
encodeFilename,
)
encodeFilename(tmpfilename, for_subprocess=True)]
for program in ['avconv', 'ffmpeg']:
- try:
- subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+ if check_executable(program, ['-version']):
break
- except (OSError, IOError):
- pass
else:
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
+ return False
cmd = [program] + args
retval = subprocess.call(cmd)
return True
else:
self.to_stderr(u"\n")
- self.report_error(u'ffmpeg exited with code %d' % retval)
+ self.report_error(u'%s exited with code %d' % (program, retval))
return False
+
+
+class NativeHlsFD(FileDownloader):
+ """ A more limited implementation that does not require ffmpeg """
+
+ def real_download(self, filename, info_dict):
+ url = info_dict['url']
+ self.report_destination(filename)
+ tmpfilename = self.temp_name(filename)
+
+ self.to_screen(
+ '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
+ data = self.ydl.urlopen(url).read()
+ s = data.decode('utf-8', 'ignore')
+ segment_urls = []
+ for line in s.splitlines():
+ line = line.strip()
+ if line and not line.startswith('#'):
+ segment_url = (
+ line
+ if re.match(r'^https?://', line)
+ else compat_urlparse.urljoin(url, line))
+ segment_urls.append(segment_url)
+
+ is_test = self.params.get('test', False)
+ remaining_bytes = self._TEST_FILE_SIZE if is_test else None
+ byte_counter = 0
+ with open(tmpfilename, 'wb') as outf:
+ for i, segurl in enumerate(segment_urls):
+ self.to_screen(
+ '[hlsnative] %s: Downloading segment %d / %d' %
+ (info_dict['id'], i + 1, len(segment_urls)))
+ seg_req = compat_urllib_request.Request(segurl)
+ if remaining_bytes is not None:
+ seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
+
+ segment = self.ydl.urlopen(seg_req).read()
+ if remaining_bytes is not None:
+ segment = segment[:remaining_bytes]
+ remaining_bytes -= len(segment)
+ outf.write(segment)
+ byte_counter += len(segment)
+ if remaining_bytes is not None and remaining_bytes <= 0:
+ break
+
+ self._hook_progress({
+ 'downloaded_bytes': byte_counter,
+ 'total_bytes': byte_counter,
+ 'filename': filename,
+ 'status': 'finished',
+ })
+ self.try_rename(tmpfilename, filename)
+ return True
+
class HttpFD(FileDownloader):
- _TEST_FILE_SIZE = 10241
-
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
headers['Youtubedl-user-agent'] = info_dict['user_agent']
if 'http_referer' in info_dict:
headers['Referer'] = info_dict['http_referer']
- basic_request = compat_urllib_request.Request(url, None, headers)
- request = compat_urllib_request.Request(url, None, headers)
+ add_headers = info_dict.get('http_headers')
+ if add_headers:
+ headers.update(add_headers)
+ data = info_dict.get('http_post_data')
+ http_method = info_dict.get('http_method')
+ basic_request = compat_urllib_request.Request(url, data, headers)
+ request = compat_urllib_request.Request(url, data, headers)
+ if http_method is not None:
+ basic_request.get_method = lambda: http_method
+ request.get_method = lambda: http_method
is_test = self.params.get('test', False)
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
- stream.close()
+ if tmpfilename != u'-':
+ stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))
from .common import FileDownloader
from ..utils import (
+ check_executable,
+ compat_str,
encodeFilename,
format_bytes,
- compat_str,
)
test = self.params.get('test', False)
# Check for rtmpdump first
- try:
- subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
- except (OSError, IOError):
+ if not check_executable('rtmpdump', ['-h']):
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False
from .adultswim import AdultSwimIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
+from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
-from .ard import ARDIE
+from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
+from .audiomack import AudiomackIE
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
+from .beeg import BeegIE
+from .behindkink import BehindKinkIE
+from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
+from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
-from .crunchyroll import CrunchyrollIE
+from .crunchyroll import (
+ CrunchyrollIE,
+ CrunchyrollShowPlaylistIE
+)
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionUserIE,
)
from .daum import DaumIE
+from .dbtv import DBTVIE
+from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
+from .drtuber import DrTuberIE
from .drtv import DRTVIE
+from .dump import DumpIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
+from .einthusan import EinthusanIE
from .eitb import EitbIE
+from .ellentv import (
+ EllenTVIE,
+ EllenTVClipsIE,
+)
from .elpais import ElPaisIE
-from .empflix import EmpflixIE
+from .empflix import EMPFlixIE
from .engadget import EngadgetIE
+from .eporner import EpornerIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
+from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .freespeech import FreespeechIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
-from .gameone import GameOneIE
+from .gameone import (
+ GameOneIE,
+ GameOnePlaylistIE,
+)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
+from .glide import GlideIE
+from .globo import GloboIE
from .godtube import GodTubeIE
+from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
+from .grooveshark import GroovesharkIE
from .hark import HarkIE
+from .heise import HeiseIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
+from .hornbunny import HornBunnyIE
+from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
+from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
+from .jove import JoveIE
from .jukebox import JukeboxIE
-from .justintv import JustinTVIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
+from .laola1tv import Laola1TvIE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamOriginalIE,
LivestreamShortenerIE,
)
+from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
+from .mgoon import MgoonIE
+from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
+from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
+from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
+from .mojvideo import MojvideoIE
+from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
+from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVServicesEmbeddedIE,
MTVIggyIE,
)
+from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
+from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE
+from .nfl import NFLIE
from .nhl import NHLIE, NHLVideocenterIE
-from .niconico import NiconicoIE
+from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
+from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
-from .npo import NPOIE
+from .npo import (
+ NPOIE,
+ TegenlichtVproIE,
+)
from .nrk import (
NRKIE,
NRKTVIE,
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
-from .oe1 import OE1IE
+from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
-from .orf import ORFIE
+from .orf import (
+ ORFTVthekIE,
+ ORFOE1IE,
+ ORFFM4IE,
+)
from .parliamentliveuk import ParliamentLiveUKIE
+from .patreon import PatreonIE
from .pbs import PBSIE
+from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
+from .planetaplay import PlanetaPlayIE
+from .played import PlayedIE
+from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
+from .pornoxo import PornoXOIE
+from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
+from .quickvid import QuickVidIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
+from .rtlnl import RtlXlIE
from .rtlnow import RTLnowIE
from .rts import RTSIE
-from .rtve import RTVEALaCartaIE
+from .rtve import RTVEALaCartaIE, RTVELiveIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
from .rutv import RUTVIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
+from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .servingsys import ServingSysIE
+from .sexykarma import SexyKarmaIE
from .shared import SharedIE
+from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
)
from .space import SpaceIE
from .spankwire import SpankwireIE
-from .spiegel import SpiegelIE
+from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
+from .sport5 import Sport5IE
+from .sportbox import SportBoxIE
+from .sportdeutschland import SportDeutschlandIE
+from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
+from .sunporno import SunPornoIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
+from .tapely import TapelyIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
+from .telecinco import TelecincoIE
+from .telemb import TeleMBIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
+from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
+from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
+from .tnaflix import TNAFlixIE
+from .thvideo import (
+ THVideoIE,
+ THVideoPlaylistIE
+)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
+from .turbo import TurboIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .tvplay import TVPlayIE
+from .twitch import TwitchIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
+from .vgtv import VGTVIE
from .vh1 import VH1IE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
+from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
+from .vidzi import VidziIE
from .vimeo import (
VimeoIE,
- VimeoChannelIE,
- VimeoUserIE,
VimeoAlbumIE,
+ VimeoChannelIE,
VimeoGroupsIE,
+ VimeoLikesIE,
VimeoReviewIE,
+ VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .viki import VikiIE
from .vk import VKIE
from .vodlocker import VodlockerIE
+from .vporn import VpornIE
+from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
+from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
+from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .xbef import XBefIE
+from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
- YahooNewsIE,
YahooSearchIE,
)
+from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
+from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
-
from .zdf import ZDFIE
-
_ALL_CLASSES = [
klass
for name, klass in globals().items()
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urls_info_json = self._search_regex(
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
IE_NAME = 'AcademicEarth:Course'
+ _TEST = {
+ 'url': 'http://academicearth.org/playlists/laws-of-nature/',
+ 'info_dict': {
+ 'id': 'laws-of-nature',
+ 'title': 'Laws of Nature',
+ 'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
+ },
+ 'playlist_count': 4,
+ }
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_path = mobj.group('path')
webpage = self._download_webpage(url, video_path)
- episode_id = self._html_search_regex(r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', webpage, 'episode_id')
+ episode_id = self._html_search_regex(
+ r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>',
+ webpage, 'episode_id')
title = self._og_search_title(webpage)
index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
duration = segment_el.attrib.get('duration')
segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
- idoc = self._download_xml(segment_url, segment_title, 'Downloading segment information', 'Unable to download segment information')
+ idoc = self._download_xml(
+ segment_url, segment_title,
+ 'Downloading segment information', 'Unable to download segment information')
formats = []
file_els = idoc.findall('.//files/file')
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ int_or_none,
+)
+
+
+class AnySexIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://anysex.com/156592/',
+ 'md5': '023e9fbb7f7987f5529a394c34ad3d3d',
+ 'info_dict': {
+ 'id': '156592',
+ 'ext': 'mp4',
+ 'title': 'Busty and sexy blondie in her bikini strips for you',
+ 'description': 'md5:de9e418178e2931c10b62966474e1383',
+ 'categories': ['Erotic'],
+ 'duration': 270,
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
+
+ title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
+ description = self._html_search_regex(
+ r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
+ thumbnail = self._html_search_regex(
+ r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
+
+ categories = re.findall(
+ r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
+
+ duration = parse_duration(self._search_regex(
+ r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
+ view_count = int_or_none(self._html_search_regex(
+ r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'age_limit': 18,
+ }
(?:$|\?)
'''
- _TEST = {
+ _TESTS = [{
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
'md5': '18ef68f48740e86ae94b98da815eec42',
'info_dict': {
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
},
'add_ie': ['FiveMin'],
- }
+ }, {
+ 'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
+ 'info_dict': {
+ 'id': '152147',
+ 'title': 'Brace Yourself - Today\'s Weirdest News',
+ },
+ 'playlist_mincount': 10,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
#coding: utf-8
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
_VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
_TEST = {
- u'url': u'http://www.aparat.com/v/wP8On',
- u'file': u'wP8On.mp4',
- u'md5': u'6714e0af7e0d875c5a39c4dc4ab46ad1',
- u'info_dict': {
- u"title": u"تیم گلکسی 11 - زومیت",
+ 'url': 'http://www.aparat.com/v/wP8On',
+ 'md5': '6714e0af7e0d875c5a39c4dc4ab46ad1',
+ 'info_dict': {
+ 'id': 'wP8On',
+ 'ext': 'mp4',
+ 'title': 'تیم گلکسی 11 - زومیت',
},
- #u'skip': u'Extremely unreliable',
+ # 'skip': 'Extremely unreliable',
}
def _real_extract(self, url):
# Note: There is an easier-to-parse configuration at
# http://www.aparat.com/video/video/config/videohash/%video_id
# but the URL in there does not work
- embed_url = (u'http://www.aparat.com/video/video/embed/videohash/' +
- video_id + u'/vt/frame')
+ embed_url = ('http://www.aparat.com/video/video/embed/videohash/' +
+ video_id + '/vt/frame')
webpage = self._download_webpage(embed_url, video_id)
video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage)
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
+ int_or_none,
)
formats.append({
'url': format_url,
'format': format['type'],
- 'width': format['width'],
- 'height': int(format['height']),
+ 'width': int_or_none(format['width']),
+ 'height': int_or_none(format['height']),
})
self._sort_formats(formats)
import re
from .common import InfoExtractor
+from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
qualities,
- compat_urllib_parse_urlparse,
- compat_urllib_parse,
+ int_or_none,
+ parse_duration,
+ unified_strdate,
+ xpath_text,
+ parse_xml,
)
-class ARDIE(InfoExtractor):
+class ARDMediathekIE(InfoExtractor):
+ IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
else:
video_id = m.group('video_id')
- urlp = compat_urllib_parse_urlparse(url)
- url = urlp._replace(path=compat_urllib_parse.quote(urlp.path.encode('utf-8'))).geturl()
-
webpage = self._download_webpage(url, video_id)
+ if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
+ raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
+
+ if re.search(r'[\?&]rss($|[=&])', url):
+ doc = parse_xml(webpage)
+ if doc.tag == 'rss':
+ return GenericIE()._extract_rss(url, video_id, doc)
+
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
'formats': formats,
'thumbnail': thumbnail,
}
+
+
+class ARDIE(InfoExtractor):
+ _VALID_URL = '(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
+ _TEST = {
+ 'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
+ 'md5': 'd216c3a86493f9322545e045ddc3eb35',
+ 'info_dict': {
+ 'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
+ 'id': '100',
+ 'ext': 'mp4',
+ 'duration': 2600,
+ 'title': 'Die Story im Ersten: Mission unter falscher Flagge',
+ 'upload_date': '20140804',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('display_id')
+
+ player_url = mobj.group('mainurl') + '~playerXml.xml'
+ doc = self._download_xml(player_url, display_id)
+ video_node = doc.find('./video')
+ upload_date = unified_strdate(xpath_text(
+ video_node, './broadcastDate'))
+ thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
+
+ formats = []
+ for a in video_node.findall('.//asset'):
+ f = {
+ 'format_id': a.attrib['type'],
+ 'width': int_or_none(a.find('./frameWidth').text),
+ 'height': int_or_none(a.find('./frameHeight').text),
+ 'vbr': int_or_none(a.find('./bitrateVideo').text),
+ 'abr': int_or_none(a.find('./bitrateAudio').text),
+ 'vcodec': a.find('./codecVideo').text,
+ 'tbr': int_or_none(a.find('./totalBitrate').text),
+ }
+ if a.find('./serverPrefix').text:
+ f['url'] = a.find('./serverPrefix').text
+ f['playpath'] = a.find('./fileName').text
+ else:
+ f['url'] = a.find('./fileName').text
+ formats.append(f)
+ self._sort_formats(formats)
+
+ return {
+ 'id': mobj.group('id'),
+ 'formats': formats,
+ 'display_id': display_id,
+ 'title': video_node.find('./title').text,
+ 'duration': parse_duration(video_node.find('./duration').text),
+ 'upload_date': upload_date,
+ 'thumbnail': thumbnail,
+ }
+
unified_strdate,
determine_ext,
get_element_by_id,
- compat_str,
get_element_by_attribute,
+ int_or_none,
)
# There are different sources of video in arte.tv, the extraction process
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
- r'arte_vp_url="(.*?)"', webpage, 'json vp url')
+ [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
+ webpage, 'json vp url')
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
+ upload_date_str = player_info.get('shootingDate')
+ if not upload_date_str:
+ upload_date_str = player_info.get('VDA', '').split(' ')[0]
+
+ title = player_info['VTI'].strip()
+ subtitle = player_info.get('VSU', '').strip()
+ if subtitle:
+ title += ' - %s' % subtitle
+
info_dict = {
'id': player_info['VID'],
- 'title': player_info['VTI'],
+ 'title': title,
'description': player_info.get('VDE'),
- 'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
+ 'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
- all_formats = player_info['VSR'].values()
+ all_formats = []
+ for format_id, format_dict in player_info['VSR'].items():
+ fmt = dict(format_dict)
+ fmt['format_id'] = format_id
+ all_formats.append(fmt)
# Some formats use the m3u8 protocol
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
def _match_lang(f):
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
return any(re.match(r, f['versionCode']) for r in regexes)
# Some formats may not be in the same language as the url
+ # TODO: Might want not to drop videos that does not match requested language
+ # but to process those formats with lower precedence
formats = filter(_match_lang, all_formats)
- formats = list(formats) # in python3 filter returns an iterator
+ formats = list(formats) # in python3 filter returns an iterator
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
- if all(f['versionCode'] == 'VO' or f['versionCode'] == 'VA' for f in all_formats):
- formats = all_formats
- else:
- raise ExtractorError(u'The formats list is empty')
+ # Sometimes there are neither videos of requested lang code
+ # nor original version videos available
+ # For such cases we just take all_formats as is
+ formats = all_formats
+ if not formats:
+ raise ExtractorError('The formats list is empty')
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
def sort_key(f):
)
formats = sorted(formats, key=sort_key)
def _format(format_info):
- quality = ''
- height = format_info.get('height')
- if height is not None:
- quality = compat_str(height)
- bitrate = format_info.get('bitrate')
- if bitrate is not None:
- quality += '-%d' % bitrate
- if format_info.get('versionCode') is not None:
- format_id = '%s-%s' % (quality, format_info['versionCode'])
- else:
- format_id = quality
info = {
- 'format_id': format_id,
- 'format_note': format_info.get('versionLibelle'),
- 'width': format_info.get('width'),
- 'height': height,
+ 'format_id': format_info['format_id'],
+ 'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')),
+ 'width': int_or_none(format_info.get('width')),
+ 'height': int_or_none(format_info.get('height')),
+ 'tbr': int_or_none(format_info.get('bitrate')),
}
if format_info['mediaType'] == 'rtmp':
info['url'] = format_info['streamer']
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:creative'
- _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
+ _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
'info_dict': {
- 'id': '050489-002',
+ 'id': '72176',
'ext': 'mp4',
- 'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design',
+ 'title': 'Folge 2 - Corporate Design',
+ 'upload_date': '20131004',
},
- }
+ }, {
+ 'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
+ 'info_dict': {
+ 'id': '160676',
+ 'ext': 'mp4',
+ 'title': 'Monty Python live (mostly)',
+ 'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
+ 'upload_date': '20140805',
+ }
+ }]
class ArteTVFutureIE(ArteTVPlus7IE):
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .soundcloud import SoundcloudIE
+from ..utils import ExtractorError
+
+import time
+
+
+class AudiomackIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
+ IE_NAME = 'audiomack'
+ _TESTS = [
+ #hosted on audiomack
+ {
+ 'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
+ 'info_dict':
+ {
+ 'id' : 'roosh-williams/extraordinary',
+ 'ext': 'mp3',
+ 'title': 'Roosh Williams - Extraordinary'
+ }
+ },
+ #hosted on soundcloud via audiomack
+ {
+ 'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
+ 'file': '172419696.mp3',
+ 'info_dict':
+ {
+ 'ext': 'mp3',
+ 'title': 'Young Thug ft Lil Wayne - Take Kare',
+ "upload_date": "20141016",
+ "description": "New track produced by London On Da Track called “Take Kare\"\n\nhttp://instagram.com/theyoungthugworld\nhttps://www.facebook.com/ThuggerThuggerCashMoney\n",
+ "uploader": "Young Thug World"
+ }
+ }
+ ]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ api_response = self._download_json(
+ "http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
+ video_id, time.time()),
+ video_id)
+
+ if "url" not in api_response:
+ raise ExtractorError("Unable to deduce api url of song")
+ realurl = api_response["url"]
+
+ #Audiomack wraps a lot of soundcloud tracks in their branded wrapper
+ # - if so, pass the work off to the soundcloud extractor
+ if SoundcloudIE.suitable(realurl):
+ return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
+
+ webpage = self._download_webpage(url, video_id)
+ artist = self._html_search_regex(
+ r'<span class="artist">(.*?)</span>', webpage, "artist")
+ songtitle = self._html_search_regex(
+ r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
+ webpage, "title")
+ title = artist + " - " + songtitle
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': realurl,
+ }
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
+ _TEST = {
+ 'url': 'http://bambuser.com/channel/pixelversity',
+ 'info_dict': {
+ 'title': 'pixelversity',
+ },
+ 'playlist_mincount': 60,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
- info_json = self._download_webpage(req, user,
- 'Downloading page %d' % i)
- results = json.loads(info_json)['result']
- if len(results) == 0:
+ data = self._download_json(
+ req, user, 'Downloading page %d' % i)
+ results = data['result']
+ if not results:
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
- 'file': '1812978515.mp3',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
- "title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
- "duration": 9.8485,
+ 'id': '1812978515',
+ 'ext': 'mp3',
+ 'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
+ 'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
+ }, {
+ 'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
+ 'md5': '2b68e5851514c20efdff2afc5603b8b4',
+ 'info_dict': {
+ 'id': '2650410135',
+ 'ext': 'mp3',
+ 'title': 'Lanius (Battle)',
+ 'uploader': 'Ben Prunty Music',
+ },
}]
def _real_extract(self, url):
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
- video_id = re.search(
- r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
- webpage, re.MULTILINE | re.DOTALL).group('id')
+ video_id = self._search_regex(
+ r'var TralbumData = {.*?id: (?P<id>\d+),?$',
+ webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
# We get the dictionary of the track from some javascript code
IE_NAME = 'Bandcamp:album'
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))'
- _TEST = {
+ _TESTS = [{
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'playlistend': 2
},
'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
- }
+ }, {
+ 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
+ 'info_dict': {
+ 'title': 'Hierophany of the Open Grave',
+ },
+ 'playlist_mincount': 9,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class BeegIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://beeg.com/5416503',
+ 'md5': '634526ae978711f6b748fe0dd6c11f57',
+ 'info_dict': {
+ 'id': '5416503',
+ 'ext': 'mp4',
+ 'title': 'Sultry Striptease',
+ 'description': 'md5:6db3c6177972822aaba18652ff59c773',
+ 'categories': list, # NSFW
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ quality_arr = self._search_regex(
+ r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
+
+ formats = [{
+ 'url': fmt[1],
+ 'format_id': fmt[0],
+ 'height': int(fmt[0][:-1]),
+ } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
+
+ self._sort_formats(formats)
+
+ title = self._html_search_regex(
+ r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
+
+ description = self._html_search_regex(
+ r'<meta name="description" content="([^"]*)"',
+ webpage, 'description', fatal=False)
+ thumbnail = self._html_search_regex(
+ r'\'previewer.url\'\s*:\s*"([^"]*)"',
+ webpage, 'thumbnail', fatal=False)
+
+ categories_str = self._html_search_regex(
+ r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
+ categories = (
+ None if categories_str is None
+ else categories_str.split(','))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'formats': formats,
+ 'age_limit': 18,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import url_basename
+
+
+class BehindKinkIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
+ _TEST = {
+ 'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/',
+ 'md5': '41ad01222b8442089a55528fec43ec01',
+ 'info_dict': {
+ 'id': '36370',
+ 'ext': 'mp4',
+ 'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!',
+ 'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...',
+ 'upload_date': '20140814',
+ 'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('id')
+ year = mobj.group('year')
+ month = mobj.group('month')
+ day = mobj.group('day')
+ upload_date = year + month + day
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_url = self._search_regex(
+ r"'file':\s*'([^']+)'",
+ webpage, 'URL base')
+
+ video_id = url_basename(video_url)
+ video_id = video_id.split('_')[0]
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': self._og_search_title(webpage),
+ 'display_id': display_id,
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'description': self._og_search_description(webpage),
+ 'upload_date': upload_date,
+ 'age_limit': 18,
+ }
--- /dev/null
+#coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class BildIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
+ IE_DESC = 'Bild.de'
+ _TEST = {
+ 'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
+ 'md5': 'dd495cbd99f2413502a1713a1156ac8a',
+ 'info_dict': {
+ 'id': '38184146',
+ 'ext': 'mp4',
+ 'title': 'BILD hat sie getestet',
+ 'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
+ 'duration': 196,
+ 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
+ doc = self._download_xml(xml_url, video_id)
+
+ duration = int_or_none(doc.attrib.get('duration'), scale=1000)
+
+ return {
+ 'id': video_id,
+ 'title': doc.attrib['ueberschrift'],
+ 'description': doc.attrib.get('text'),
+ 'url': doc.attrib['src'],
+ 'thumbnail': doc.attrib.get('img'),
+ 'duration': duration,
+ }
class BlipTVIE(SubtitlesInfoExtractor):
- _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+]+)))'
+ _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
_TESTS = [
{
'uploader_id': '792887',
'duration': 279,
}
+ },
+ {
+ # https://bugzilla.redhat.com/show_bug.cgi?id=967465
+ 'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI',
+ 'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6',
+ 'info_dict': {
+ 'id': '6573122',
+ 'ext': 'mov',
+ 'upload_date': '20130520',
+ 'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.',
+ 'title': 'Red vs. Blue Season 11 Trailer',
+ 'timestamp': 1369029609,
+ 'uploader': 'redvsblue',
+ 'uploader_id': '792887',
+ }
}
]
class BlipTVUserIE(InfoExtractor):
- _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+ _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
_PAGE_SIZE = 12
IE_NAME = 'blip.tv:user'
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
ExtractorError,
'title': 'Wenn das Traditions-Theater wackelt',
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
'duration': 34,
- }
- },
- {
- 'url': 'http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html',
- 'md5': 'ab451b09d861dbed7d7cc9ab0be19ebe',
- 'info_dict': {
- 'id': '2c060e69-3a27-4e13-b0f0-668fac17d812',
- 'ext': 'mp4',
- 'title': 'Über den Pass',
- 'description': 'Die Eroberung der Alpen: Über den Pass',
- 'duration': 2588,
+ 'uploader': 'BR',
+ 'upload_date': '20140802',
}
},
{
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('id')
+ display_id = self._match_id(url)
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
import json
from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+)
class BreakIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
- _TEST = {
+ _VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
+ _TESTS = [{
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
- 'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
+ 'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
'info_dict': {
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
}
- }
+ }, {
+ 'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1).split("-")[-1]
- embed_url = 'http://www.break.com/embed/%s' % video_id
- webpage = self._download_webpage(embed_url, video_id)
- info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
- webpage, 'info json', flags=re.DOTALL)
- info = json.loads(info_json)
- video_url = info['videoUri']
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(
+ 'http://www.break.com/embed/%s' % video_id, video_id)
+ info = json.loads(self._search_regex(
+ r'var embedVars = ({.*})\s*?</script>',
+ webpage, 'info json', flags=re.DOTALL))
+
youtube_id = info.get('youtubeId')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
- final_url = video_url + '?' + info['AuthToken']
+ formats = [{
+ 'url': media['uri'] + '?' + info['AuthToken'],
+ 'tbr': media['bitRate'],
+ 'width': media['width'],
+ 'height': media['height'],
+ } for media in info['media']]
+
+ if not formats:
+ formats.append({
+ 'url': info['videoUri']
+ })
+
+ self._sort_formats(formats)
+
+ duration = int_or_none(info.get('videoLengthInSeconds'))
+ age_limit = parse_age_limit(info.get('audienceRating'))
+
return {
'id': video_id,
- 'url': final_url,
'title': info['contentName'],
'thumbnail': info['thumbUri'],
+ 'duration': duration,
+ 'age_limit': age_limit,
+ 'formats': formats,
}
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
},
},
+ {
+ # playlist test
+ # from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
+ 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
+ 'info_dict': {
+ 'title': 'Sealife',
+ },
+ 'playlist_mincount': 7,
+ },
]
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
- url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
+ url_m = re.search(
+ r'<meta\s+property="og:video"\s+content="(https?://(?:secure|c)\.brightcove.com/[^"]+)"',
+ webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
- if 'playerKey' in url:
+ if 'playerKey' in url or 'videoId' in url:
return [url]
matches = re.findall(
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query_str, query, referer=referer)
- else:
+ elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
+ else:
+ raise ExtractorError(
+ 'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
+ expected=True)
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req.add_header('Referer', referer)
webpage = self._download_webpage(req, video_id)
+ error_msg = self._html_search_regex(
+ r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
+ 'error message', default=None)
+ if error_msg is not None:
+ raise ExtractorError(
+ 'brightcove said: %s' % error_msg, expected=True)
+
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
}, {
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
'info_dict': {
- 'id': 'P9gjWjelt6iP',
+ 'id': 'WWF_5KqY3PK1',
'ext': 'flv',
'title': 'Live on Letterman - St. Vincent',
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
'id': '85523671',
'ext': 'mp4',
'title': 'The Sunday Times - Icons',
- 'description': 'md5:a5f7ff82e2f7a9ed77473fe666954e84',
+ 'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
'uploader': 'Us',
'uploader_id': 'usfilms',
'upload_date': '20140131'
webpage = self._download_webpage(url, display_id)
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
- mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
+ mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
if not mobj:
raise ExtractorError('Can\'t extract embed url and video id')
playerdata_url = mobj.group('embed_url')
video_description = self._html_search_regex(
r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False)
+ video_thumbnail = self._og_search_thumbnail(webpage)
playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
- video_thumbnail = self._search_regex(
- r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
- sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
- videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
+ vidurl = self._search_regex(
+ r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
+ vidid = self._search_regex(
+ r'\'vidid\'\s*:\s*"([^\']+)"', playerdata, 'vidid')
+ videoserver = self._html_search_regex(
+ r"'videoserver'\s*:\s*'([^']+)'", playerdata, 'videoserver')
+
+ videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
formats = []
- baseurl = sd_url[:sd_url.rfind('/')+1]
+ baseurl = vidurl[:vidurl.rfind('/')+1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:
from __future__ import unicode_literals
+import json
import re
from .common import InfoExtractor
-translation_table = {
+_translation_table = {
'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
'y': 'l', 'z': 'i',
}
+def _decode(s):
+ return ''.join(_translation_table.get(c, c) for c in s)
+
+
class CliphunterIE(InfoExtractor):
IE_NAME = 'cliphunter'
'''
_TEST = {
'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
- 'file': '1012420.flv',
- 'md5': '15e7740f30428abf70f4223478dc1225',
+ 'md5': 'a2ba71eebf523859fe527a61018f723e',
'info_dict': {
+ 'id': '1012420',
+ 'ext': 'mp4',
'title': 'Fun Jynx Maze solo',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'age_limit': 18,
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
+ video_title = self._search_regex(
+ r'mediaTitle = "([^"]+)"', webpage, 'title')
+
pl_fiji = self._search_regex(
r'pl_fiji = \'([^\']+)\'', webpage, 'video data')
pl_c_qual = self._search_regex(
r'pl_c_qual = "(.)"', webpage, 'video quality')
- video_title = self._search_regex(
- r'mediaTitle = "([^"]+)"', webpage, 'title')
-
- video_url = ''.join(translation_table.get(c, c) for c in pl_fiji)
-
+ video_url = _decode(pl_fiji)
formats = [{
'url': video_url,
- 'format_id': pl_c_qual,
+ 'format_id': 'default-%s' % pl_c_qual,
}]
+ qualities_json = self._search_regex(
+ r'var pl_qualities\s*=\s*(.*?);\n', webpage, 'quality info')
+ qualities_data = json.loads(qualities_json)
+
+ for i, t in enumerate(
+ re.findall(r"pl_fiji_([a-z0-9]+)\s*=\s*'([^']+')", webpage)):
+ quality_id, crypted_url = t
+ video_url = _decode(crypted_url)
+ f = {
+ 'format_id': quality_id,
+ 'url': video_url,
+ 'quality': i,
+ }
+ if quality_id in qualities_data:
+ qd = qualities_data[quality_id]
+ m = re.match(
+ r'''(?x)<b>(?P<width>[0-9]+)x(?P<height>[0-9]+)<\\/b>
+ \s*\(\s*(?P<tbr>[0-9]+)\s*kb\\/s''', qd)
+ if m:
+ f['width'] = int(m.group('width'))
+ f['height'] = int(m.group('height'))
+ f['tbr'] = int(m.group('tbr'))
+ formats.append(f)
+ self._sort_formats(formats)
+
+ thumbnail = self._search_regex(
+ r"var\s+mov_thumb\s*=\s*'([^']+)';",
+ webpage, 'thumbnail', fatal=False)
+
return {
'id': video_id,
'title': video_title,
'formats': formats,
+ 'age_limit': self._rta_search(webpage),
+ 'thumbnail': thumbnail,
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_parse_qs,
+ compat_urllib_parse,
+ remove_end,
+ HEADRequest,
+ compat_HTTPError,
+)
+
+
+class CloudyIE(InfoExtractor):
+ _IE_DESC = 'cloudy.ec and videoraj.ch'
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?(?P<host>cloudy\.ec|videoraj\.ch)/
+ (?:v/|embed\.php\?id=)
+ (?P<id>[A-Za-z0-9]+)
+ '''
+ _EMBED_URL = 'http://www.%s/embed.php?id=%s'
+ _API_URL = 'http://www.%s/api/player.api.php?%s'
+ _MAX_TRIES = 2
+ _TESTS = [
+ {
+ 'url': 'https://www.cloudy.ec/v/af511e2527aac',
+ 'md5': '5cb253ace826a42f35b4740539bedf07',
+ 'info_dict': {
+ 'id': 'af511e2527aac',
+ 'ext': 'flv',
+ 'title': 'Funny Cats and Animals Compilation june 2013',
+ }
+ },
+ {
+ 'url': 'http://www.videoraj.ch/v/47f399fd8bb60',
+ 'md5': '7d0f8799d91efd4eda26587421c3c3b0',
+ 'info_dict': {
+ 'id': '47f399fd8bb60',
+ 'ext': 'flv',
+ 'title': 'Burning a New iPhone 5 with Gasoline - Will it Survive?',
+ }
+ }
+ ]
+
+ def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
+
+ if try_num > self._MAX_TRIES - 1:
+ raise ExtractorError('Unable to extract video URL', expected=True)
+
+ form = {
+ 'file': video_id,
+ 'key': file_key,
+ }
+
+ if error_url:
+ form.update({
+ 'numOfErrors': try_num,
+ 'errorCode': '404',
+ 'errorUrl': error_url,
+ })
+
+ data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
+ player_data = self._download_webpage(
+ data_url, video_id, 'Downloading player data')
+ data = compat_parse_qs(player_data)
+
+ try_num += 1
+
+ if 'error' in data:
+ raise ExtractorError(
+ '%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
+ expected=True)
+
+ title = data.get('title', [None])[0]
+ if title:
+ title = remove_end(title, '&asdasdas').strip()
+
+ video_url = data.get('url', [None])[0]
+
+ if video_url:
+ try:
+ self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
+ self.report_warning('Invalid video URL, requesting another', video_id)
+ return self._extract_video(video_host, video_id, file_key, video_url, try_num)
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_host = mobj.group('host')
+ video_id = mobj.group('id')
+
+ url = self._EMBED_URL % (video_host, video_id)
+ webpage = self._download_webpage(url, video_id)
+
+ file_key = self._search_regex(
+ r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
+
+ return self._extract_video(video_host, video_id, file_key)
class CNNIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
- (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
+ (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn(-ap)?|(?=&)))'''
_TESTS = [{
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
- (?:(?:guests/[^/]+|videos|video-playlists|special-editions)/[^/]+/(?P<videotitle>[^/?#]+))
+ (?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
- _TEST = {
+ _TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
- }
+ }, {
+ 'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
+ 'only_matching': True,
+ }]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
+
+ 'format_note': 'HTTP 400 at the moment (patches welcome!)',
+ 'preference': -100,
})
formats.append({
'format_id': 'rtmp-%s' % format,
+from __future__ import unicode_literals
+
import base64
+import datetime
import hashlib
import json
import netrc
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
+ compat_urlparse,
compat_str,
clean_html,
compiled_regex_type,
ExtractorError,
+ float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
+ * fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
+ * source_preference Order number for this video source
+ (quality takes higher priority)
+ -1 for default (order by other properties),
+ -2 or smaller for less than default.
+ * http_referer HTTP Referer header value to set.
+ * http_method HTTP method to use for the download.
+ * http_headers A dictionary of additional HTTP headers
+ to add to the request.
+ * http_post_data Additional data to send with a POST
+ request.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
- location: Physical location of the video.
+ location: Physical location where the video was filmed.
subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}.
duration: Length of the video in seconds, as an integer.
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
+ is_live: True, False, or None (=unknown). Whether this video is a
+ live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
+ Unless mentioned otherwise, None is equivalent to absence of information.
+
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
+ @classmethod
+ def _match_id(cls, url):
+ if '_VALID_URL_RE' not in cls.__dict__:
+ cls._VALID_URL_RE = re.compile(cls._VALID_URL)
+ m = cls._VALID_URL_RE.match(url)
+ assert m
+ return m.group('id')
+
@classmethod
def working(cls):
"""Getter method for _WORKING."""
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
- self.to_screen(u'%s' % (note,))
+ self.to_screen('%s' % (note,))
else:
- self.to_screen(u'%s: %s' % (video_id, note))
+ self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
- errnote = u'Unable to download webpage'
- errmsg = u'%s: %s' % (errnote, compat_str(err))
+ errnote = 'Unable to download webpage'
+ errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns a tuple (page content as string, URL handle) """
-
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
if urlh is False:
assert not fatal
return False
+ content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
+ return (content, urlh)
+
+ def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
- self.to_screen(u'Dumping request to ' + url)
+ self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
- h = u'___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
+ h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
- self.to_screen(u'Saving request to ' + filename)
+ self.to_screen('Saving request to ' + filename)
+ # Working around MAX_PATH limitation on Windows (see
+ # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
+ if os.name == 'nt':
+ absfilepath = os.path.abspath(filename)
+ if len(absfilepath) > 259:
+ filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
- if (u'<title>Access to this site is blocked</title>' in content and
- u'Websense' in content[:512]):
- msg = u'Access to this webpage has been blocked by Websense filtering software in your network.'
+ if ('<title>Access to this site is blocked</title>' in content and
+ 'Websense' in content[:512]):
+ msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
- u'Websense information URL', default=None)
+ 'Websense information URL', default=None)
if blocked_iframe:
- msg += u' Visit %s for more details' % blocked_iframe
+ msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
- return (content, urlh)
+ return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the data of the page as a string """
return content
def _download_xml(self, url_or_request, video_id,
- note=u'Downloading XML', errnote=u'Unable to download XML',
+ note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
- note=u'Downloading JSON metadata',
- errnote=u'Unable to download JSON metadata',
+ note='Downloading JSON metadata',
+ errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True):
json_string = self._download_webpage(
try:
return json.loads(json_string)
except ValueError as ve:
- raise ExtractorError('Failed to download JSON', cause=ve)
+ errmsg = '%s: Failed to parse JSON ' % video_id
+ if fatal:
+ raise ExtractorError(errmsg, cause=ve)
+ else:
+ self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
- idstr = u'' if video_id is None else u'%s: ' % video_id
+ idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
- u'[%s] %s%s' % (self.IE_NAME, idstr, msg))
+ '[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
- self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
+ self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
- self.to_screen(u'%s: Extracting information' % id_or_name)
+ self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self.to_screen(u'%s: Downloading webpage' % video_id)
+ self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
- self.to_screen(u'Confirming age')
+ self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
- self.to_screen(u'Logging in')
+ self.to_screen('Logging in')
#Methods for following #608
@staticmethod
break
if os.name != 'nt' and sys.stderr.isatty():
- _name = u'\033[0;34m%s\033[0m' % name
+ _name = '\033[0;34m%s\033[0m' % name
else:
_name = name
elif default is not _NO_DEFAULT:
return default
elif fatal:
- raise RegexNotFoundError(u'Unable to extract %s' % _name)
+ raise RegexNotFoundError('Unable to extract %s' % _name)
else:
- self._downloader.report_warning(u'unable to extract %s; '
- u'please report this issue on http://yt-dl.org/bug' % _name)
+ self._downloader.report_warning('unable to extract %s; '
+ 'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
+ def _get_tfa_info(self):
+ """
+ Get the two-factor authentication info
+ TODO - asking the user will be required for sms/phone verify
+ currently just uses the command line option
+ If there's no info available, return None
+ """
+ if self._downloader is None:
+ return None
+ downloader_params = self._downloader.params
+
+ if downloader_params.get('twofactor', None) is not None:
+ return downloader_params['twofactor']
+
+ return None
+
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
- return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
+ return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
- regexes = self._og_regexes('video')
- if secure: regexes = self._og_regexes('video:secure_url') + regexes
+ regexes = self._og_regexes('video') + self._og_regexes('video:url')
+ if secure:
+ regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
def _sort_formats(self, formats):
if not formats:
- raise ExtractorError(u'No video formats found')
+ raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
- ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus']
+ ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
- ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a']
+ ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
- ORDER = [u'flv', u'mp4', u'webm']
+ ORDER = ['flv', 'mp4', 'webm']
else:
- ORDER = [u'webm', u'flv', u'mp4']
+ ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
+ f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
+ f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id'),
)
formats.sort(key=_formats_key)
def http_scheme(self):
- """ Either "https:" or "https:", depending on the user's preferences """
+ """ Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
- msg_template = u'%(video_id)s: Waiting for %(timeout)s seconds'
+ msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
'Unable to download f4m manifest')
formats = []
- for media_el in manifest.findall('{http://ns.adobe.com/f4m/1.0}media'):
+ media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
+ for i, media_el in enumerate(media_nodes):
+ tbr = int_or_none(media_el.attrib.get('bitrate'))
+ format_id = 'f4m-%d' % (i if tbr is None else tbr)
formats.append({
+ 'format_id': format_id,
'url': manifest_url,
'ext': 'flv',
- 'tbr': int_or_none(media_el.attrib.get('bitrate')),
+ 'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
})
return formats
+ def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
+ entry_protocol='m3u8', preference=None):
+
+ formats = [{
+ 'format_id': 'm3u8-meta',
+ 'url': m3u8_url,
+ 'ext': ext,
+ 'protocol': 'm3u8',
+ 'preference': -1,
+ 'resolution': 'multiple',
+ 'format_note': 'Quality selection URL',
+ }]
+
+ format_url = lambda u: (
+ u
+ if re.match(r'^https?://', u)
+ else compat_urlparse.urljoin(m3u8_url, u))
+
+ m3u8_doc = self._download_webpage(
+ m3u8_url, video_id,
+ note='Downloading m3u8 information',
+ errnote='Failed to download m3u8 information')
+ last_info = None
+ kv_rex = re.compile(
+ r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
+ for line in m3u8_doc.splitlines():
+ if line.startswith('#EXT-X-STREAM-INF:'):
+ last_info = {}
+ for m in kv_rex.finditer(line):
+ v = m.group('val')
+ if v.startswith('"'):
+ v = v[1:-1]
+ last_info[m.group('key')] = v
+ elif line.startswith('#') or not line.strip():
+ continue
+ else:
+ if last_info is None:
+ formats.append({'url': format_url(line)})
+ continue
+ tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
+
+ f = {
+ 'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
+ 'url': format_url(line.strip()),
+ 'tbr': tbr,
+ 'ext': ext,
+ 'protocol': entry_protocol,
+ 'preference': preference,
+ }
+ codecs = last_info.get('CODECS')
+ if codecs:
+ # TODO: looks like video codec is not always necessarily goes first
+ va_codecs = codecs.split(',')
+ if va_codecs[0]:
+ f['vcodec'] = va_codecs[0].partition('.')[0]
+ if len(va_codecs) > 1 and va_codecs[1]:
+ f['acodec'] = va_codecs[1].partition('.')[0]
+ resolution = last_info.get('RESOLUTION')
+ if resolution:
+ width_str, height_str = resolution.split('x')
+ f['width'] = int(width_str)
+ f['height'] = int(height_str)
+ formats.append(f)
+ last_info = {}
+ self._sort_formats(formats)
+ return formats
+
+ def _live_title(self, name):
+ """ Generate the title for a live video """
+ now = datetime.datetime.now()
+ now_str = now.strftime("%Y-%m-%d %H:%M")
+ return name + ' ' + now_str
+
+ def _int(self, v, name, fatal=False, **kwargs):
+ res = int_or_none(v, **kwargs)
+ if 'get_attr' in kwargs:
+ print(getattr(v, kwargs['get_attr']))
+ if res is None:
+ msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
+ if fatal:
+ raise ExtractorError(msg)
+ else:
+ self._downloader.report_warning(msg)
+ return res
+
+ def _float(self, v, name, fatal=False, **kwargs):
+ res = float_or_none(v, **kwargs)
+ if res is None:
+ msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
+ if fatal:
+ raise ExtractorError(msg)
+ else:
+ self._downloader.report_warning(msg)
+ return res
+
class SearchInfoExtractor(InfoExtractor):
"""
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
- raise ExtractorError(u'Invalid search query "%s"' % query)
+ raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
else:
n = int(prefix)
if n <= 0:
- raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+ raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
- self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+ self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
_VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
+ EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
+
_TEST = {
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'md5': '1921f713ed48aabd715691f774c451f7',
import json
import base64
import zlib
+import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
-from .common import InfoExtractor
+from .subtitles import SubtitlesInfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse,
intlist_to_bytes,
unified_strdate,
clean_html,
+ urlencode_postdata,
)
from ..aes import (
aes_cbc_decrypt,
inc,
)
+from .common import InfoExtractor
-class CrunchyrollIE(InfoExtractor):
+class CrunchyrollIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TEST = {
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
'uploader': 'Yomiuri Telecasting Corporation (YTV)',
'upload_date': '20131013',
+ 'url': 're:(?!.*&)',
},
'params': {
# rtmp
'1080': ('80', '108'),
}
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+ self.report_login()
+ login_url = 'https://www.crunchyroll.com/?a=formhandler'
+ data = urlencode_postdata({
+ 'formname': 'RpcApiUser_Login',
+ 'name': username,
+ 'password': password,
+ })
+ login_request = compat_urllib_request.Request(login_url, data)
+ login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ self._download_webpage(login_request, None, False, 'Wrong login info')
+
+
+ def _real_initialize(self):
+ self._login()
+
+
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
return zlib.decompress(decrypted_data)
- def _convert_subtitles_to_srt(self, subtitles):
+ def _convert_subtitles_to_srt(self, sub_root):
output = ''
- for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
- start = start.replace('.', ',')
- end = end.replace('.', ',')
- text = clean_html(text)
- text = text.replace('\\N', '\n')
- if not text:
- continue
+
+ for i, event in enumerate(sub_root.findall('./events/event'), 1):
+ start = event.attrib['start'].replace('.', ',')
+ end = event.attrib['end'].replace('.', ',')
+ text = event.attrib['text'].replace('\\N', '\n')
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
return output
+ def _convert_subtitles_to_ass(self, sub_root):
+ output = ''
+
+ def ass_bool(strvalue):
+ assvalue = '0'
+ if strvalue == '1':
+ assvalue = '-1'
+ return assvalue
+
+ output = '[Script Info]\n'
+ output += 'Title: %s\n' % sub_root.attrib["title"]
+ output += 'ScriptType: v4.00+\n'
+ output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
+ output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
+ output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+ output += """ScaledBorderAndShadow: yes
+
+[V4+ Styles]
+Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
+"""
+ for style in sub_root.findall('./styles/style'):
+ output += 'Style: ' + style.attrib["name"]
+ output += ',' + style.attrib["font_name"]
+ output += ',' + style.attrib["font_size"]
+ output += ',' + style.attrib["primary_colour"]
+ output += ',' + style.attrib["secondary_colour"]
+ output += ',' + style.attrib["outline_colour"]
+ output += ',' + style.attrib["back_colour"]
+ output += ',' + ass_bool(style.attrib["bold"])
+ output += ',' + ass_bool(style.attrib["italic"])
+ output += ',' + ass_bool(style.attrib["underline"])
+ output += ',' + ass_bool(style.attrib["strikeout"])
+ output += ',' + style.attrib["scale_x"]
+ output += ',' + style.attrib["scale_y"]
+ output += ',' + style.attrib["spacing"]
+ output += ',' + style.attrib["angle"]
+ output += ',' + style.attrib["border_style"]
+ output += ',' + style.attrib["outline"]
+ output += ',' + style.attrib["shadow"]
+ output += ',' + style.attrib["alignment"]
+ output += ',' + style.attrib["margin_l"]
+ output += ',' + style.attrib["margin_r"]
+ output += ',' + style.attrib["margin_v"]
+ output += ',' + style.attrib["encoding"]
+ output += '\n'
+
+ output += """
+[Events]
+Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
+"""
+ for event in sub_root.findall('./events/event'):
+ output += 'Dialogue: 0'
+ output += ',' + event.attrib["start"]
+ output += ',' + event.attrib["end"]
+ output += ',' + event.attrib["style"]
+ output += ',' + event.attrib["name"]
+ output += ',' + event.attrib["margin_l"]
+ output += ',' + event.attrib["margin_r"]
+ output += ',' + event.attrib["margin_v"]
+ output += ',' + event.attrib["effect"]
+ output += ',' + event.attrib["text"]
+ output += '\n'
+
+ return output
+
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
- streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
- video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
- video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
+ streamdata = self._download_xml(
+ streamdata_req, video_id,
+ note='Downloading media info for %s' % video_format)
+ video_url = streamdata.find('.//host').text
+ video_play_path = streamdata.find('.//file').text
formats.append({
'url': video_url,
- 'play_path': video_play_path,
+ 'play_path': video_play_path,
'ext': 'flv',
'format': video_format,
'format_id': video_format,
})
subtitles = {}
+ sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
video_id, note='Downloading subtitles for '+sub_name)
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
if not lang_code:
continue
- subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
+ sub_root = xml.etree.ElementTree.fromstring(subtitle)
+ if not sub_root:
+ subtitles[lang_code] = ''
+ if sub_format == 'ass':
+ subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
+ else:
+ subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
+
+ if self._downloader.params.get('listsubtitles', False):
+ self._list_available_subtitles(video_id, subtitles)
+ return
return {
'id': video_id,
'subtitles': subtitles,
'formats': formats,
}
+
+
+class CrunchyrollShowPlaylistIE(InfoExtractor):
+ IE_NAME = "crunchyroll:playlist"
+ _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
+
+ _TESTS = [{
+ 'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+ 'info_dict': {
+ 'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
+ 'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
+ },
+ 'playlist_count': 13,
+ }]
+
+ def _real_extract(self, url):
+ show_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, show_id)
+ title = self._html_search_regex(
+ r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
+ webpage, 'title')
+ episode_paths = re.findall(
+ r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"',
+ webpage)
+ entries = [
+ self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll')
+ for ep in episode_paths
+ ]
+ entries.reverse()
+
+ return {
+ '_type': 'playlist',
+ 'id': show_id,
+ 'title': title,
+ 'entries': entries,
+ }
'title': 'International Health Care Models',
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
}
+ }, {
+ 'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall',
+ 'info_dict': {
+ 'id': '342759',
+ 'title': 'General Motors Ignition Switch Recall',
+ },
+ 'playlist_duration_sum': 14855,
}]
def _real_extract(self, url):
+#coding: utf-8
+from __future__ import unicode_literals
+
import re
import json
import itertools
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
- IE_NAME = u'dailymotion'
+ IE_NAME = 'dailymotion'
_FORMATS = [
- (u'stream_h264_ld_url', u'ld'),
- (u'stream_h264_url', u'standard'),
- (u'stream_h264_hq_url', u'hq'),
- (u'stream_h264_hd_url', u'hd'),
- (u'stream_h264_hd1080_url', u'hd180'),
+ ('stream_h264_ld_url', 'ld'),
+ ('stream_h264_url', 'standard'),
+ ('stream_h264_hq_url', 'hq'),
+ ('stream_h264_hd_url', 'hd'),
+ ('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
- u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
- u'file': u'x33vw9.mp4',
- u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
- u'info_dict': {
- u"uploader": u"Amphora Alex and Van .",
- u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
+ 'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
+ 'md5': '392c4b85a60a90dc4792da41ce3144eb',
+ 'info_dict': {
+ 'id': 'x33vw9',
+ 'ext': 'mp4',
+ 'uploader': 'Amphora Alex and Van .',
+ 'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"',
}
},
# Vevo video
{
- u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
- u'file': u'USUV71301934.mp4',
- u'info_dict': {
- u'title': u'Roar (Official)',
- u'uploader': u'Katy Perry',
- u'upload_date': u'20130905',
+ 'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
+ 'info_dict': {
+ 'title': 'Roar (Official)',
+ 'id': 'USUV71301934',
+ 'ext': 'mp4',
+ 'uploader': 'Katy Perry',
+ 'upload_date': '20130905',
},
- u'params': {
- u'skip_download': True,
+ 'params': {
+ 'skip_download': True,
},
- u'skip': u'VEVO is only available in some countries',
+ 'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
- u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
- u'file': u'xyh2zz.mp4',
- u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
- u'info_dict': {
- u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
- u'uploader': 'HotWaves1012',
- u'age_limit': 18,
+ 'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
+ 'md5': '0d667a7b9cebecc3c89ee93099c4159d',
+ 'info_dict': {
+ 'id': 'xyh2zz',
+ 'ext': 'mp4',
+ 'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
+ 'uploader': 'HotWaves1012',
+ 'age_limit': 18,
}
-
}
]
def _real_extract(self, url):
- # Extract id and simplified title from URL
- mobj = re.match(self._VALID_URL, url)
-
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
url = 'http://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
- self.to_screen(u'Vevo video detected: %s' % vevo_id)
- return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
+ self.to_screen('Vevo video detected: %s' % vevo_id)
+ return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
age_limit = self._rta_search(webpage)
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
- u'Downloading embed page')
+ 'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
'height': height,
})
if not formats:
- raise ExtractorError(u'Unable to extract video URL')
+ raise ExtractorError('Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
self._list_available_subtitles(video_id, webpage)
return
- view_count = self._search_regex(
- r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, u'view count', fatal=False)
- if view_count is not None:
- view_count = str_to_int(view_count)
+ view_count = str_to_int(self._search_regex(
+ r'video_views_count[^>]+>\s+([\d\.,]+)',
+ webpage, 'view count', fatal=False))
+
+ title = self._og_search_title(webpage, default=None)
+ if title is None:
+ title = self._html_search_regex(
+ r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
+ 'title')
return {
- 'id': video_id,
+ 'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
- 'upload_date': video_upload_date,
- 'title': self._og_search_title(webpage),
- 'subtitles': video_subtitles,
+ 'upload_date': video_upload_date,
+ 'title': title,
+ 'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
return sub_lang_list
- self._downloader.report_warning(u'video doesn\'t have subtitles')
+ self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
- IE_NAME = u'dailymotion:playlist'
+ IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
+ _TESTS = [{
+ 'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
+ 'info_dict': {
+ 'title': 'SPORT',
+ },
+ 'playlist_mincount': 20,
+ }]
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
- id, u'Downloading page %s' % pagenum)
+ id, 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
class DailymotionUserIE(DailymotionPlaylistIE):
- IE_NAME = u'dailymotion:user'
+ IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
+ _TESTS = [{
+ 'url': 'https://www.dailymotion.com/user/nqtv',
+ 'info_dict': {
+ 'id': 'nqtv',
+ 'title': 'Rémi Gaillard',
+ },
+ 'playlist_mincount': 100,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
webpage = self._download_webpage(url, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
- webpage, u'user', flags=re.DOTALL))
+ webpage, 'user'))
return {
'_type': 'playlist',
class DaumIE(InfoExtractor):
- _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
- _TEST = {
+ _TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
'upload_date': '20130831',
'duration': 3868,
},
- }
+ }, {
+ 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, 'Downloading video formats info')
- self.to_screen(u'%s: Getting video urls' % video_id)
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
- video_id, note=False)
+ video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ float_or_none,
+ int_or_none,
+ clean_html,
+)
+
+
+class DBTVIE(InfoExtractor):
+ _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
+ _TEST = {
+ 'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
+ 'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
+ 'info_dict': {
+ 'id': '33100',
+ 'display_id': 'Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
+ 'ext': 'mp4',
+ 'title': 'Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen',
+ 'description': 'md5:1504a54606c4dde3e4e61fc97aa857e0',
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'timestamp': 1404039863.438,
+ 'upload_date': '20140629',
+ 'duration': 69.544,
+ 'view_count': int,
+ 'categories': list,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ data = self._download_json(
+ 'http://api.dbtv.no/discovery/%s' % video_id, display_id)
+
+ video = data['playlist'][0]
+
+ formats = [{
+ 'url': f['URL'],
+ 'vcodec': f.get('container'),
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ 'vbr': float_or_none(f.get('rate'), 1000),
+ 'filesize': int_or_none(f.get('size')),
+ } for f in video['renditions'] if 'URL' in f]
+
+ if not formats:
+ for url_key, format_id in [('URL', 'mp4'), ('HLSURL', 'hls')]:
+ if url_key in video:
+ formats.append({
+ 'url': video[url_key],
+ 'format_id': format_id,
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video['id'],
+ 'display_id': display_id,
+ 'title': video['title'],
+ 'description': clean_html(video['desc']),
+ 'thumbnail': video.get('splash') or video.get('thumb'),
+ 'timestamp': float_or_none(video.get('publishedAt'), 1000),
+ 'duration': float_or_none(video.get('length'), 1000),
+ 'view_count': int_or_none(video.get('views')),
+ 'categories': video.get('tags'),
+ 'formats': formats,
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ orderedSet,
+)
+
+
+class DeezerPlaylistIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?deezer\.com/playlist/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.deezer.com/playlist/176747451',
+ 'info_dict': {
+ 'id': '176747451',
+ 'title': 'Best!',
+ 'uploader': 'Anonymous',
+ 'thumbnail': 're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
+ },
+ 'playlist_count': 30,
+ 'skip': 'Only available in .de',
+ }
+
+ def _real_extract(self, url):
+ if 'test' not in self._downloader.params:
+ self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
+
+ mobj = re.match(self._VALID_URL, url)
+ playlist_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, playlist_id)
+ geoblocking_msg = self._html_search_regex(
+ r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
+ default=None)
+ if geoblocking_msg is not None:
+ raise ExtractorError(
+ 'Deezer said: %s' % geoblocking_msg, expected=True)
+
+ data_json = self._search_regex(
+ r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
+ data = json.loads(data_json)
+
+ playlist_title = data.get('DATA', {}).get('TITLE')
+ playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
+ playlist_thumbnail = self._search_regex(
+ r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
+ 'playlist thumbnail')
+
+ preview_pattern = self._search_regex(
+ r"var SOUND_PREVIEW_GATEWAY\s*=\s*'([^']+)';", webpage,
+ 'preview URL pattern', fatal=False)
+ entries = []
+ for s in data['SONGS']['data']:
+ puid = s['MD5_ORIGIN']
+ preview_video_url = preview_pattern.\
+ replace('{0}', puid[0]).\
+ replace('{1}', puid).\
+ replace('{2}', s['MEDIA_VERSION'])
+ formats = [{
+ 'format_id': 'preview',
+ 'url': preview_video_url,
+ 'preference': -100, # Only the first 30 seconds
+ 'ext': 'mp3',
+ }]
+ self._sort_formats(formats)
+ artists = ', '.join(
+ orderedSet(a['ART_NAME'] for a in s['ARTISTS']))
+ entries.append({
+ 'id': s['SNG_ID'],
+ 'duration': int_or_none(s.get('DURATION')),
+ 'title': '%s - %s' % (artists, s['SNG_TITLE']),
+ 'uploader': s['ART_NAME'],
+ 'uploader_id': s['ART_ID'],
+ 'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
+ 'formats': formats,
+ })
+
+ return {
+ '_type': 'playlist',
+ 'id': playlist_id,
+ 'title': playlist_title,
+ 'uploader': playlist_uploader,
+ 'thumbnail': playlist_thumbnail,
+ 'entries': entries,
+ }
video_id)
video_info = player_info.find('video')
- f4m_info = self._download_xml(video_info.find('url').text, video_id)
+ f4m_info = self._download_xml(self._proto_relative_url(video_info.find('url').text.strip()), video_id)
token_el = f4m_info.find('token')
manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] + '&hdcore=3.2.0'
IE_NAME = 'divxstage'
IE_DESC = 'DivxStage'
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag)'}
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
_HOST = 'www.divxstage.eu'
'title': 'youtubedl test video',
'description': 'This is a test video for youtubedl.',
}
- }
\ No newline at end of file
+ }
import re
from .common import InfoExtractor
-from ..utils import compat_urllib_parse_unquote
+from ..utils import compat_urllib_parse_unquote, url_basename
class DropboxIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
- _TEST = {
- 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4',
- 'md5': '8a3d905427a6951ccb9eb292f154530b',
+ _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
+ _TESTS = [{
+ 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
'info_dict': {
'id': 'nelirfsxnmcfbfh',
'ext': 'mp4',
'title': 'youtube-dl test video \'ä"BaW_jenozKc'
}
- }
+ },
+ {
+ 'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
+ 'only_matching': True,
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- fn = compat_urllib_parse_unquote(mobj.group('title'))
+ fn = compat_urllib_parse_unquote(url_basename(url))
title = os.path.splitext(fn)[0]
- video_url = url + '?dl=1'
+ video_url = re.sub(r'[?&]dl=0', '', url)
+ video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
return {
'id': video_id,
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import str_to_int
+
+
+class DrTuberIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?drtuber\.com/video/(?P<id>\d+)/(?P<display_id>[\w-]+)'
+ _TEST = {
+ 'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
+ 'md5': '93e680cf2536ad0dfb7e74d94a89facd',
+ 'info_dict': {
+ 'id': '1740434',
+ 'display_id': 'hot-perky-blonde-naked-golf',
+ 'ext': 'mp4',
+ 'title': 'Hot Perky Blonde Naked Golf',
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ 'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_url = self._html_search_regex(
+ r'<source src="([^"]+)"', webpage, 'video URL')
+
+ title = self._html_search_regex(
+ r'<title>([^<]+)\s*-\s*Free', webpage, 'title')
+
+ thumbnail = self._html_search_regex(
+ r'poster="([^"]+)"',
+ webpage, 'thumbnail', fatal=False)
+
+ like_count = str_to_int(self._html_search_regex(
+ r'<span id="rate_likes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
+ webpage, 'like count', fatal=False))
+ dislike_count = str_to_int(self._html_search_regex(
+ r'<span id="rate_dislikes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
+ webpage, 'like count', fatal=False))
+ comment_count = str_to_int(self._html_search_regex(
+ r'<span class="comments_count">([\d,\.]+)</span>',
+ webpage, 'comment count', fatal=False))
+
+ cats_str = self._search_regex(
+ r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False)
+ categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': video_url,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'like_count': like_count,
+ 'dislike_count': dislike_count,
+ 'comment_count': comment_count,
+ 'categories': categories,
+ 'age_limit': self._rta_search(webpage),
+ }
from __future__ import unicode_literals
-import re
-
from .subtitles import SubtitlesInfoExtractor
from .common import ExtractorError
from ..utils import parse_iso8601
class DRTVIE(SubtitlesInfoExtractor):
- _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/[^/]+/(?P<id>[\da-z-]+)'
+ _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
title = data['Title']
description = data['Description']
- timestamp = parse_iso8601(data['CreatedTime'][:-5])
+ timestamp = parse_iso8601(data['CreatedTime'])
thumbnail = None
duration = None
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class DumpIE(InfoExtractor):
+ _VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
+
+ _TEST = {
+ 'url': 'http://www.dump.com/oneus/',
+ 'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
+ 'info_dict': {
+ 'id': 'oneus',
+ 'ext': 'flv',
+ 'title': "He's one of us.",
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ }
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ video_url = self._search_regex(
+ r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
+
+ thumb = self._og_search_thumbnail(webpage)
+ title = self._search_regex(r'<b>([^"]+)</b>', webpage, 'title')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ 'thumbnail': thumb,
+ }
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
-from ..utils import determine_ext
class EbaumsWorldIE(InfoExtractor):
_VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
_TEST = {
- u'url': u'http://www.ebaumsworld.com/video/watch/83367677/',
- u'file': u'83367677.mp4',
- u'info_dict': {
- u'title': u'A Giant Python Opens The Door',
- u'description': u'This is how nightmares start...',
- u'uploader': u'jihadpizza',
+ 'url': 'http://www.ebaumsworld.com/video/watch/83367677/',
+ 'info_dict': {
+ 'id': '83367677',
+ 'ext': 'mp4',
+ 'title': 'A Giant Python Opens The Door',
+ 'description': 'This is how nightmares start...',
+ 'uploader': 'jihadpizza',
},
}
'id': video_id,
'title': config.find('title').text,
'url': video_url,
- 'ext': determine_ext(video_url),
'description': config.find('description').text,
'thumbnail': config.find('image').text,
'uploader': config.find('username').text,
+# coding: utf-8
+from __future__ import unicode_literals
+
import json
import random
import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
+ compat_str,
)
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
_TEST = {
- u"name": u"EightTracks",
- u"url": u"http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
- u"playlist": [
+ "name": "EightTracks",
+ "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
+ "info_dict": {
+ 'id': '1336550',
+ 'display_id': 'youtube-dl-test-tracks-a',
+ "description": "test chars: \"'/\\ä↭",
+ "title": "youtube-dl test tracks \"'/\\ä↭<>",
+ },
+ "playlist": [
{
- u"file": u"11885610.m4a",
- u"md5": u"96ce57f24389fc8734ce47f4c1abcc55",
- u"info_dict": {
- u"title": u"youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "96ce57f24389fc8734ce47f4c1abcc55",
+ "info_dict": {
+ "id": "11885610",
+ "ext": "m4a",
+ "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885608.m4a",
- u"md5": u"4ab26f05c1f7291ea460a3920be8021f",
- u"info_dict": {
- u"title": u"youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "4ab26f05c1f7291ea460a3920be8021f",
+ "info_dict": {
+ "id": "11885608",
+ "ext": "m4a",
+ "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885679.m4a",
- u"md5": u"d30b5b5f74217410f4689605c35d1fd7",
- u"info_dict": {
- u"title": u"youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "d30b5b5f74217410f4689605c35d1fd7",
+ "info_dict": {
+ "id": "11885679",
+ "ext": "m4a",
+ "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885680.m4a",
- u"md5": u"4eb0a669317cd725f6bbd336a29f923a",
- u"info_dict": {
- u"title": u"youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "4eb0a669317cd725f6bbd336a29f923a",
+ "info_dict": {
+ "id": "11885680",
+ "ext": "m4a",
+ "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885682.m4a",
- u"md5": u"1893e872e263a2705558d1d319ad19e8",
- u"info_dict": {
- u"title": u"PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "1893e872e263a2705558d1d319ad19e8",
+ "info_dict": {
+ "id": "11885682",
+ "ext": "m4a",
+ "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885683.m4a",
- u"md5": u"b673c46f47a216ab1741ae8836af5899",
- u"info_dict": {
- u"title": u"PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "b673c46f47a216ab1741ae8836af5899",
+ "info_dict": {
+ "id": "11885683",
+ "ext": "m4a",
+ "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885684.m4a",
- u"md5": u"1d74534e95df54986da7f5abf7d842b7",
- u"info_dict": {
- u"title": u"phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "1d74534e95df54986da7f5abf7d842b7",
+ "info_dict": {
+ "id": "11885684",
+ "ext": "m4a",
+ "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
},
{
- u"file": u"11885685.m4a",
- u"md5": u"f081f47af8f6ae782ed131d38b9cd1c0",
- u"info_dict": {
- u"title": u"phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
- u"uploader_id": u"ytdl"
+ "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
+ "info_dict": {
+ "id": "11885685",
+ "ext": "m4a",
+ "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
+ "uploader_id": "ytdl"
}
}
]
}
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
- json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
+ json_like = self._search_regex(
+ r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information')
data = json.loads(json_like)
session = str(random.randint(0, 1000000000))
track_count = data['tracks_count']
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
next_url = first_url
- res = []
+ entries = []
for i in range(track_count):
- api_json = self._download_webpage(next_url, playlist_id,
- note=u'Downloading song information %s/%s' % (str(i+1), track_count),
- errnote=u'Failed to download song information')
+ api_json = self._download_webpage(
+ next_url, playlist_id,
+ note='Downloading song information %d/%d' % (i + 1, track_count),
+ errnote='Failed to download song information')
api_data = json.loads(api_json)
- track_data = api_data[u'set']['track']
+ track_data = api_data['set']['track']
info = {
- 'id': track_data['id'],
+ 'id': compat_str(track_data['id']),
'url': track_data['track_file_stream_url'],
'title': track_data['performer'] + u' - ' + track_data['name'],
'raw_title': track_data['name'],
'uploader_id': data['user']['login'],
'ext': 'm4a',
}
- res.append(info)
- next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
- return res
+ entries.append(info)
+ next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
+ session, mix_id, track_data['id'])
+ return {
+ '_type': 'playlist',
+ 'entries': entries,
+ 'id': compat_str(mix_id),
+ 'display_id': playlist_id,
+ 'title': data.get('name'),
+ 'description': data.get('description'),
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class EinthusanIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
+ _TESTS = [
+ {
+ 'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
+ 'md5': 'af244f4458cd667205e513d75da5b8b1',
+ 'info_dict': {
+ 'id': '2447',
+ 'ext': 'mp4',
+ 'title': 'Ek Villain',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
+ }
+ },
+ {
+ 'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
+ 'md5': 'ef63c7a803e22315880ed182c10d1c5c',
+ 'info_dict': {
+ 'id': '1671',
+ 'ext': 'mp4',
+ 'title': 'Soodhu Kavvuum',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ video_title = self._html_search_regex(
+ r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
+
+ video_url = self._html_search_regex(
+ r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
+ webpage, 'video url')
+
+ description = self._html_search_meta('description', webpage)
+ thumbnail = self._html_search_regex(
+ r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
+ webpage, "thumbnail url", fatal=False)
+ if thumbnail is not None:
+ thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
+
+ return {
+ 'id': video_id,
+ 'title': video_title,
+ 'url': video_url,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ }
# encoding: utf-8
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
class EitbIE(InfoExtractor):
- IE_NAME = u'eitb.tv'
+ IE_NAME = 'eitb.tv'
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
_TEST = {
- u'add_ie': ['Brightcove'],
- u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
- u'md5': u'edf4436247185adee3ea18ce64c47998',
- u'info_dict': {
- u'id': u'2743577154001',
- u'ext': u'mp4',
- u'title': u'60 minutos (Lasa y Zabala, 30 años)',
+ 'add_ie': ['Brightcove'],
+ 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
+ 'md5': 'edf4436247185adee3ea18ce64c47998',
+ 'info_dict': {
+ 'id': '2743577154001',
+ 'ext': 'mp4',
+ 'title': '60 minutos (Lasa y Zabala, 30 años)',
# All videos from eitb has this description in the brightcove info
- u'description': u'.',
- u'uploader': u'Euskal Telebista',
+ 'description': '.',
+ 'uploader': 'Euskal Telebista',
},
}
webpage = self._download_webpage(url, chapter_id)
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
if bc_url is None:
- raise ExtractorError(u'Could not extract the Brightcove url')
+ raise ExtractorError('Could not extract the Brightcove url')
# The BrightcoveExperience object doesn't contain the video id, we set
# it manually
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ parse_iso8601,
+)
+
+
+class EllenTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?ellentv\.com/videos/(?P<id>[a-z0-9_-]+)'
+ _TEST = {
+ 'url': 'http://www.ellentv.com/videos/0-7jqrsr18/',
+ 'md5': 'e4af06f3bf0d5f471921a18db5764642',
+ 'info_dict': {
+ 'id': '0-7jqrsr18',
+ 'ext': 'mp4',
+ 'title': 'What\'s Wrong with These Photos? A Whole Lot',
+ 'timestamp': 1406876400,
+ 'upload_date': '20140801',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ timestamp = parse_iso8601(self._search_regex(
+ r'<span class="publish-date"><time datetime="([^"]+)">',
+ webpage, 'timestamp'))
+
+ return {
+ 'id': video_id,
+ 'title': self._og_search_title(webpage),
+ 'url': self._html_search_meta('VideoURL', webpage, 'url'),
+ 'timestamp': timestamp,
+ }
+
+
+class EllenTVClipsIE(InfoExtractor):
+ IE_NAME = 'EllenTV:clips'
+ _VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
+ _TEST = {
+ 'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
+ 'info_dict': {
+ 'id': 'meryl-streep-vanessa-hudgens',
+ 'title': 'Meryl Streep, Vanessa Hudgens',
+ },
+ 'playlist_mincount': 9,
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ playlist_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, playlist_id)
+ playlist = self._extract_playlist(webpage)
+
+ return {
+ '_type': 'playlist',
+ 'id': playlist_id,
+ 'title': self._og_search_title(webpage),
+ 'entries': self._extract_entries(playlist)
+ }
+
+ def _extract_playlist(self, webpage):
+ json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
+ try:
+ return json.loads("[{" + json_string + "}]")
+ except ValueError as ve:
+ raise ExtractorError('Failed to download JSON', cause=ve)
+
+ def _extract_entries(self, playlist):
+ return [self.url_result(item['url'], 'EllenTV') for item in playlist]
from __future__ import unicode_literals
-import re
+from .tnaflix import TNAFlixIE
-from .common import InfoExtractor
+class EMPFlixIE(TNAFlixIE):
+ _VALID_URL = r'^https?://www\.empflix\.com/videos/(?P<display_id>[0-9a-zA-Z-]+)-(?P<id>[0-9]+)\.html'
+
+ _TITLE_REGEX = r'name="title" value="(?P<title>[^"]*)"'
+ _DESCRIPTION_REGEX = r'name="description" value="([^"]*)"'
+ _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
-class EmpflixIE(InfoExtractor):
- _VALID_URL = r'^https?://www\.empflix\.com/videos/.*?-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
+ 'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
+ 'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- webpage = self._download_webpage(url, video_id)
- age_limit = self._rta_search(webpage)
-
- video_title = self._html_search_regex(
- r'name="title" value="(?P<title>[^"]*)"', webpage, 'title')
- video_description = self._html_search_regex(
- r'name="description" value="([^"]*)"', webpage, 'description', fatal=False)
-
- cfg_url = self._html_search_regex(
- r'flashvars\.config = escape\("([^"]+)"',
- webpage, 'flashvars.config')
-
- cfg_xml = self._download_xml(
- cfg_url, video_id, note='Downloading metadata')
-
- formats = [
- {
- 'url': item.find('videoLink').text,
- 'format_id': item.find('res').text,
- } for item in cfg_xml.findall('./quality/item')
- ]
-
- return {
- 'id': video_id,
- 'title': video_title,
- 'description': video_description,
- 'formats': formats,
- 'age_limit': age_limit,
- }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ str_to_int,
+)
+
+
+class EpornerIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
+ _TEST = {
+ 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
+ 'md5': '39d486f046212d8e1b911c52ab4691f8',
+ 'info_dict': {
+ 'id': '95008',
+ 'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
+ 'ext': 'mp4',
+ 'title': 'Infamous Tiffany Teen Strip Tease Video',
+ 'duration': 194,
+ 'view_count': int,
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+ title = self._html_search_regex(
+ r'<title>(.*?) - EPORNER', webpage, 'title')
+
+ redirect_code = self._html_search_regex(
+ r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id,
+ webpage, 'redirect_code')
+ redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code)
+ player_code = self._download_webpage(
+ redirect_url, display_id, note='Downloading player config')
+
+ sources = self._search_regex(
+ r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', player_code, 'sources')
+
+ formats = []
+ for video_url, format_id in re.findall(r'file\s*:\s*"([^"]+)",\s*label\s*:\s*"([^"]+)"', sources):
+ fmt = {
+ 'url': video_url,
+ 'format_id': format_id,
+ }
+ m = re.search(r'^(\d+)', format_id)
+ if m:
+ fmt['height'] = int(m.group(1))
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ duration = parse_duration(self._search_regex(
+ r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
+ fatal=False))
+ view_count = str_to_int(self._search_regex(
+ r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
+ webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'formats': formats,
+ 'age_limit': self._rta_search(webpage),
+ }
r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False)
- playerUrl = self._og_search_video_url(webpage, name=u'player URL')
+ playerUrl = self._og_search_video_url(webpage, name='player URL')
title = self._html_search_regex(
r'<meta name="title" content="([^"]*)"',
class EveryonesMixtapeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
- _TEST = {
+ _TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
- 'file': '5bfseWNmlds.mp4',
"info_dict": {
+ 'id': '5bfseWNmlds',
+ 'ext': 'mp4',
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
"uploader": "FKR.TV",
"uploader_id": "frenchkissrecords",
'params': {
'skip_download': True, # This is simply YouTube
}
- }
+ }, {
+ 'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
+ 'info_dict': {
+ 'id': 'm7m0jJAbMQi',
+ 'title': 'Driving',
+ },
+ 'playlist_count': 24
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ unified_strdate,
+)
+
+
+class ExpoTVIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
+ _TEST = {
+ 'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561',
+ 'md5': '2985e6d7a392b2f7a05e0ca350fe41d0',
+ 'info_dict': {
+ 'id': '17561',
+ 'ext': 'mp4',
+ 'upload_date': '20060212',
+ 'title': 'My Favorite Online Scrapbook Store',
+ 'view_count': int,
+ 'description': 'You\'ll find most everything you need at this virtual store front.',
+ 'uploader': 'Anna T.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ player_key = self._search_regex(
+ r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
+ config_url = 'http://client.expotv.com/video/config/%s/%s' % (
+ video_id, player_key)
+ config = self._download_json(
+ config_url, video_id,
+ note='Downloading video configuration')
+
+ formats = [{
+ 'url': fcfg['file'],
+ 'height': int_or_none(fcfg.get('height')),
+ 'format_note': fcfg.get('label'),
+ 'ext': self._search_regex(
+ r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
+ 'file extension', default=None),
+ } for fcfg in config['sources']]
+ self._sort_formats(formats)
+
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage)
+ thumbnail = config.get('image')
+ view_count = int_or_none(self._search_regex(
+ r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
+ uploader = self._search_regex(
+ r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
+ fatal=False)
+ upload_date = unified_strdate(self._search_regex(
+ r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
+ fatal=False))
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'title': title,
+ 'description': description,
+ 'view_count': view_count,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ }
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
+ str_to_int,
)
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
+ 'view_count': int,
'age_limit': 18,
}
}, {
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
- r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
- fatal=False)
+ r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
+ webpage, 'uploader', fatal=False)
+ view_count = str_to_int(self._html_search_regex(
+ r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
+ webpage, 'view count', fatal=False))
+
video_url = compat_urllib_parse.unquote(self._html_search_regex(
r'video_url=(.+?)&', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
'id': video_id,
'title': video_title,
'uploader': uploader,
+ 'view_count': view_count,
'url': video_url,
'format': format,
'format_id': format,
compat_urllib_parse,
compat_urllib_request,
urlencode_postdata,
-
ExtractorError,
+ limit_length,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
- (?:[^#?]*\#!/)?
- (?:video/video\.php|photo\.php|video/embed)\?(?:.*?)
+ (?:[^#]*?\#!/)?
+ (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
- _TEST = {
- 'url': 'https://www.facebook.com/photo.php?v=120708114770723',
- 'md5': '48975a41ccc4b7a581abd68651c1a5a8',
+ _TESTS = [{
+ 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
+ 'md5': '6a40d33c0eccbb1af76cf0485a052659',
+ 'info_dict': {
+ 'id': '637842556329505',
+ 'ext': 'mp4',
+ 'duration': 38,
+ 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
+ }
+ }, {
+ 'note': 'Video without discernible title',
+ 'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
- 'id': '120708114770723',
+ 'id': '274175099429670',
'ext': 'mp4',
- 'duration': 279,
- 'title': 'PEOPLE ARE AWESOME 2013',
+ 'title': 'Facebook video #274175099429670',
}
- }
+ }, {
+ 'url': 'https://www.facebook.com/video.php?v=10204634152394104',
+ 'only_matching': True,
+ }]
def _login(self):
(useremail, password) = self._get_login_info()
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
- 'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'),
+ 'h': self._search_regex(
+ r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
raise ExtractorError('Cannot find video URL')
video_title = self._html_search_regex(
- r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title')
+ r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
+ fatal=False)
+ if not video_title:
+ video_title = self._html_search_regex(
+ r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
+ webpage, 'alternative title', default=None)
+ video_title = limit_length(video_title, 80)
+ if not video_title:
+ video_title = 'Facebook video #%s' % video_id
return {
'id': video_id,
# encoding: utf-8
-import re
+from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- determine_ext,
-)
class FazIE(InfoExtractor):
- IE_NAME = u'faz.net'
+ IE_NAME = 'faz.net'
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
_TEST = {
- u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
- u'file': u'12610585.mp4',
- u'info_dict': {
- u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
- u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
+ 'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
+ 'info_dict': {
+ 'id': '12610585',
+ 'ext': 'mp4',
+ 'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
+ 'description': 'md5:1453fbf9a0d041d985a47306192ea253',
},
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- self.to_screen(video_id)
+ video_id = self._match_id(url)
+
webpage = self._download_webpage(url, video_id)
- config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
- u'config xml url')
- config = self._download_xml(config_xml_url, video_id,
- u'Downloading config xml')
+ config_xml_url = self._search_regex(
+ r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+ config = self._download_xml(
+ config_xml_url, video_id, 'Downloading config xml')
encodings = config.find('ENCODINGS')
formats = []
- for code in ['LOW', 'HIGH', 'HQ']:
+ for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
encoding = encodings.find(code)
if encoding is None:
continue
encoding_url = encoding.find('FILENAME').text
formats.append({
'url': encoding_url,
- 'ext': determine_ext(encoding_url),
'format_id': code.lower(),
+ 'quality': pref,
})
+ self._sort_formats(formats)
- descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
+ descr = self._html_search_regex(
+ r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
return {
'id': video_id,
'title': self._og_search_title(webpage),
fields = dict(re.findall(r'''(?x)<input\s+
type="hidden"\s+
name="([^"]+)"\s+
- (?:id="[^"]+"\s+)?
value="([^"]*)"
''', webpage))
ext = self._search_regex(r'type:\s?\'([^\']+)\',',
webpage, 'extension', fatal=False)
video_url = self._search_regex(
- r'file:\s?\'(http[^\']+)\',', webpage, 'file url')
+ r'file:\s?loadURL\(\'(http[^\']+)\'\),', webpage, 'file url')
formats = [{
'format_id': 'sd',
+from __future__ import unicode_literals
+
import re
import random
import json
from .common import InfoExtractor
from ..utils import (
- determine_ext,
get_element_by_id,
clean_html,
)
class FKTVIE(InfoExtractor):
- IE_NAME = u'fernsehkritik.tv'
- _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
+ IE_NAME = 'fernsehkritik.tv'
+ _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
_TEST = {
- u'url': u'http://fernsehkritik.tv/folge-1',
- u'file': u'00011.flv',
- u'info_dict': {
- u'title': u'Folge 1 vom 10. April 2007',
- u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
+ 'url': 'http://fernsehkritik.tv/folge-1',
+ 'info_dict': {
+ 'id': '00011',
+ 'ext': 'flv',
+ 'title': 'Folge 1 vom 10. April 2007',
+ 'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
},
}
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
episode)
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
- u'playlist', flags=re.DOTALL)
+ 'playlist', flags=re.DOTALL)
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
# TODO: return a single multipart video
videos = []
videos.append({
'id': video_id,
'url': video_url,
- 'ext': determine_ext(video_url),
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
'thumbnail': video_thumbnail
class FKTVPosteckeIE(InfoExtractor):
- IE_NAME = u'fernsehkritik.tv:postecke'
- _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
+ IE_NAME = 'fernsehkritik.tv:postecke'
+ _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
_TEST = {
- u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
- u'file': u'0120.flv',
- u'md5': u'262f0adbac80317412f7e57b4808e5c4',
- u'info_dict': {
- u"title": u"Postecke 120"
+ 'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
+ 'md5': '262f0adbac80317412f7e57b4808e5c4',
+ 'info_dict': {
+ 'id': '0120',
+ 'ext': 'flv',
+ 'title': 'Postecke 120',
}
}
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
video_title = 'Postecke %d' % episode
return {
- 'id': video_id,
- 'url': video_url,
- 'ext': determine_ext(video_url),
- 'title': video_title,
+ 'id': video_id,
+ 'url': video_url,
+ 'title': video_title,
}
class FlickrIE(InfoExtractor):
- """Information Extractor for Flickr videos"""
- _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
+ _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
_TEST = {
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
- 'file': '5645318632.mp4',
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
'info_dict': {
+ 'id': '5645318632',
+ 'ext': 'mp4',
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
"uploader_id": "forestwander-nature-pictures",
"title": "Dark Hollow Waterfalls"
raise ExtractorError('Unable to extract video url')
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
- return [{
- 'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
- 'title': self._og_search_title(webpage),
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': video_uploader_id,
- }]
+ }
import re
from .common import InfoExtractor
+from ..utils import int_or_none
class FranceInterIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
+ _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
- 'file': '793962.mp3',
'md5': '4764932e466e6f6c79c317d2e74f6884',
"info_dict": {
- "title": "L’Histoire dans les jeux vidéo",
+ 'id': '793962',
+ 'ext': 'mp3',
+ 'title': 'L’Histoire dans les jeux vidéo',
+ 'description': 'md5:7e93ddb4451e7530022792240a3049c7',
+ 'timestamp': 1387369800,
+ 'upload_date': '20131218',
},
}
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
- title = self._html_search_regex(
- r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
+
path = self._search_regex(
- r'&urlAOD=(.*?)&startTime', webpage, 'video url')
+ r'<a id="player".+?href="([^"]+)"', webpage, 'video url')
video_url = 'http://www.franceinter.fr/' + path
+ title = self._html_search_regex(
+ r'<span class="title">(.+?)</span>', webpage, 'title')
+ description = self._html_search_regex(
+ r'<span class="description">(.*?)</span>',
+ webpage, 'description', fatal=False)
+ timestamp = int_or_none(self._search_regex(
+ r'data-date="(\d+)"', webpage, 'upload date', fatal=False))
+
return {
'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
'formats': [{
'url': video_url,
'vcodec': 'none',
}],
- 'title': title,
}
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
+ ExtractorError,
+ clean_html,
+ parse_duration,
+ compat_urllib_parse_urlparse,
+ int_or_none,
)
class FranceTVBaseInfoExtractor(InfoExtractor):
- def _extract_video(self, video_id):
- info = self._download_xml(
- 'http://www.francetvinfo.fr/appftv/webservices/video/'
- 'getInfosOeuvre.php?id-diffusion='
- + video_id, video_id, 'Downloading XML config')
-
- manifest_url = info.find('videos/video/url').text
- manifest_url = manifest_url.replace('/z/', '/i/')
-
- if manifest_url.startswith('rtmp'):
- formats = [{'url': manifest_url, 'ext': 'flv'}]
- else:
- formats = []
- available_formats = self._search_regex(r'/[^,]*,(.*?),k\.mp4', manifest_url, 'available formats')
- for index, format_descr in enumerate(available_formats.split(',')):
- format_info = {
- 'url': manifest_url.replace('manifest.f4m', 'index_%d_av.m3u8' % index),
- 'ext': 'mp4',
- }
- m_resolution = re.search(r'(?P<width>\d+)x(?P<height>\d+)', format_descr)
- if m_resolution is not None:
- format_info.update({
- 'width': int(m_resolution.group('width')),
- 'height': int(m_resolution.group('height')),
- })
- formats.append(format_info)
-
- thumbnail_path = info.find('image').text
+ def _extract_video(self, video_id, catalogue):
+ info = self._download_json(
+ 'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
+ % (video_id, catalogue),
+ video_id, 'Downloading video JSON')
+
+ if info.get('status') == 'NOK':
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
+
+ formats = []
+ for video in info['videos']:
+ if video['statut'] != 'ONLINE':
+ continue
+ video_url = video['url']
+ if not video_url:
+ continue
+ format_id = video['format']
+ if video_url.endswith('.f4m'):
+ video_url_parsed = compat_urllib_parse_urlparse(video_url)
+ f4m_url = self._download_webpage(
+ 'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
+ video_id, 'Downloading f4m manifest token', fatal=False)
+ if f4m_url:
+ f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
+ for f4m_format in f4m_formats:
+ f4m_format['preference'] = 1
+ formats.extend(f4m_formats)
+ elif video_url.endswith('.m3u8'):
+ formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
+ elif video_url.startswith('rtmp'):
+ formats.append({
+ 'url': video_url,
+ 'format_id': 'rtmp-%s' % format_id,
+ 'ext': 'flv',
+ 'preference': 1,
+ })
+ else:
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ 'preference': -1,
+ })
+ self._sort_formats(formats)
return {
'id': video_id,
- 'title': info.find('titre').text,
+ 'title': info['titre'],
+ 'description': clean_html(info['synopsis']),
+ 'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
+ 'duration': parse_duration(info['duree']),
+ 'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
- 'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
- 'description': info.find('synopsis').text,
}
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r'data-diffusion="(\d+)"', webpage, 'ID')
- return self._extract_video(video_id)
+ return self._extract_video(video_id, 'Pluzz')
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Soir 3',
- },
- 'params': {
- 'skip_download': True,
+ 'upload_date': '20130826',
+ 'timestamp': 1377548400,
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
- }
+ },
+ 'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
- video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
- return self._extract_video(video_id)
+ video_id, catalogue = self._search_regex(
+ r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
+ return self._extract_video(video_id, catalogue)
class FranceTVIE(FranceTVBaseInfoExtractor):
# france2
{
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
- 'file': '75540104.mp4',
+ 'md5': 'c03fc87cb85429ffd55df32b9fc05523',
'info_dict': {
- 'title': '13h15, le samedi...',
- 'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d',
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'id': '109169362',
+ 'ext': 'flv',
+ 'title': '13h15, le dimanche...',
+ 'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
+ 'upload_date': '20140914',
+ 'timestamp': 1410693600,
},
},
# france3
{
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
+ 'md5': '679bb8f8921f8623bd658fa2f8364da0',
'info_dict': {
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Le scandale du prix des médicaments',
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
+ 'upload_date': '20131113',
+ 'timestamp': 1384380000,
},
},
# france4
{
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
+ 'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
'info_dict': {
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Hero Corp Making of - Extrait 1',
'description': 'md5:c87d54871b1790679aec1197e73d650a',
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
+ 'upload_date': '20131106',
+ 'timestamp': 1383766500,
},
},
# france5
{
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
+ 'md5': '78f0f4064f9074438e660785bbf2c5d9',
'info_dict': {
- 'id': '92837968',
- 'ext': 'mp4',
+ 'id': '108961659',
+ 'ext': 'flv',
'title': 'C à dire ?!',
- 'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
+ 'upload_date': '20140915',
+ 'timestamp': 1410795000,
},
},
# franceo
{
'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
+ 'md5': '52f0bfe202848b15915a2f39aaa8981b',
'info_dict': {
- 'id': '92327925',
- 'ext': 'mp4',
- 'title': 'Infô-Afrique',
+ 'id': '108634970',
+ 'ext': 'flv',
+ 'title': 'Infô Afrique',
'description': 'md5:ebf346da789428841bee0fd2a935ea55',
+ 'upload_date': '20140915',
+ 'timestamp': 1410822000,
},
- 'params': {
- # m3u8 download
- 'skip_download': True,
- },
- 'skip': 'The id changes frequently',
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- if mobj.group('key'):
- webpage = self._download_webpage(url, mobj.group('key'))
- id_res = [
- (r'''(?x)<div\s+class="video-player">\s*
- <a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
- class="francetv-video-player">'''),
- (r'<a id="player_direct" href="http://info\.francetelevisions'
- '\.fr/\?id-video=([^"/&]+)'),
- (r'<a class="video" id="ftv_player_(.+?)"'),
- ]
- video_id = self._html_search_regex(id_res, webpage, 'video ID')
- else:
- video_id = mobj.group('id')
- return self._extract_video(video_id)
+ webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
+ video_id, catalogue = self._html_search_regex(
+ r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
+ webpage, 'video ID').split('@')
+ return self._extract_video(video_id, catalogue)
class GenerationQuoiIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
_TEST = {
- 'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
+ 'url': 'http://culturebox.francetvinfo.fr/festivals/dans-les-jardins-de-william-christie/dans-les-jardins-de-william-christie-le-camus-162553',
+ 'md5': '5ad6dec1ffb2a3fbcb20cc4b744be8d6',
'info_dict': {
- 'id': 'EV_6785',
- 'ext': 'mp4',
- 'title': 'Einstein on the beach au Théâtre du Châtelet',
- 'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb',
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'id': 'EV_22853',
+ 'ext': 'flv',
+ 'title': 'Dans les jardins de William Christie - Le Camus',
+ 'description': 'md5:4710c82315c40f0c865ca8b9a68b5299',
+ 'upload_date': '20140829',
+ 'timestamp': 1409317200,
},
}
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
- video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id')
- return self._extract_video(video_id)
+ video_id, catalogue = self._search_regex(
+ r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
+
+ return self._extract_video(video_id, catalogue)
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
- 'md5': 'ff4d83318f89776ed0250634cfaa8d36',
+ 'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
- links = re.findall(r'<source src="([^"]+/v)\d+\.([^"]+)" type=\'video', webpage)
+ links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
if not links:
raise ExtractorError('No media links available for %s' % video_id)
'age_limit': age_limit,
'timestamp': timestamp,
}
+
+
+class GameOnePlaylistIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?gameone\.de(?:/tv)?/?$'
+ IE_NAME = 'gameone:playlist'
+ _TEST = {
+ 'url': 'http://www.gameone.de/tv',
+ 'info_dict': {
+ 'title': 'GameOne',
+ },
+ 'playlist_mincount': 294,
+ }
+
+ def _real_extract(self, url):
+ webpage = self._download_webpage('http://www.gameone.de/tv', 'TV')
+ max_id = max(map(int, re.findall(r'<a href="/tv/(\d+)"', webpage)))
+ entries = [
+ self.url_result('http://www.gameone.de/tv/%d' % video_id, 'GameOne')
+ for video_id in range(max_id, 0, -1)]
+
+ return {
+ '_type': 'playlist',
+ 'title': 'GameOne',
+ 'entries': entries,
+ }
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
- compat_urllib_error,
compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
+ determine_ext,
ExtractorError,
+ float_or_none,
HEADRequest,
+ orderedSet,
parse_xml,
smuggle_url,
unescapeHTML,
unified_strdate,
+ unsmuggle_url,
url_basename,
)
from .brightcove import BrightcoveIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .smotri import SmotriIE
+from .condenast import CondeNastIE
class GenericIE(InfoExtractor):
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
- 'md5': '7cf780be104d40fea7bae52eed4a470e',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
- 'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
+ 'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
- 'id': '981',
+ 'id': '1969',
'ext': 'mp4',
- 'title': 'My web playroom',
- 'uploader': 'Ze Frank',
- 'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
+ 'title': 'Hidden miracles of the natural world',
+ 'uploader': 'Louie Schwartzberg',
+ 'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embeded Ustream video
'skip_download': 'Requires rtmpdump'
}
},
- # smotri embed
- {
- 'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
- 'md5': 'ec40048448e9284c9a1de77bb188108b',
- 'info_dict': {
- 'id': 'v27008541fad',
- 'ext': 'mp4',
- 'title': 'Крым и Севастополь вошли в состав России',
- 'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
- 'duration': 900,
- 'upload_date': '20140318',
- 'uploader': 'rbctv_2012_4',
- 'uploader_id': 'rbctv_2012_4',
- },
- },
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'description': 'Mario\'s life in the fast lane has never looked so good.',
},
},
+ # YouTube embed via <data-embed-url="">
+ {
+ 'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
+ 'info_dict': {
+ 'id': '4vAffPZIT44',
+ 'ext': 'mp4',
+ 'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
+ 'uploader': 'Gameloft',
+ 'uploader_id': 'gameloft',
+ 'upload_date': '20140828',
+ 'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ },
+ # Camtasia studio
+ {
+ 'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
+ 'playlist': [{
+ 'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
+ 'info_dict': {
+ 'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
+ 'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
+ 'ext': 'flv',
+ 'duration': 2235.90,
+ }
+ }, {
+ 'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
+ 'info_dict': {
+ 'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
+ 'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
+ 'ext': 'flv',
+ 'duration': 2235.93,
+ }
+ }],
+ 'info_dict': {
+ 'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
+ }
+ },
+ # Flowplayer
+ {
+ 'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
+ 'md5': '9d65602bf31c6e20014319c7d07fba27',
+ 'info_dict': {
+ 'id': '5123ea6d5e5a7',
+ 'ext': 'mp4',
+ 'age_limit': 18,
+ 'uploader': 'www.handjobhub.com',
+ 'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
+ }
+ },
+ # RSS feed
+ {
+ 'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
+ 'info_dict': {
+ 'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
+ 'title': 'Zero Punctuation',
+ 'description': 're:'
+ },
+ 'playlist_mincount': 11,
+ },
+ # Multiple brightcove videos
+ # https://github.com/rg3/youtube-dl/issues/2283
+ {
+ 'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
+ 'info_dict': {
+ 'id': 'always-never',
+ 'title': 'Always / Never - The New Yorker',
+ },
+ 'playlist_count': 3,
+ 'params': {
+ 'extract_flat': False,
+ 'skip_download': True,
+ }
+ },
+ # MLB embed
+ {
+ 'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
+ 'md5': '96f09a37e44da40dd083e12d9a683327',
+ 'info_dict': {
+ 'id': '33322633',
+ 'ext': 'mp4',
+ 'title': 'Ump changes call to ball',
+ 'description': 'md5:71c11215384298a172a6dcb4c2e20685',
+ 'duration': 48,
+ 'timestamp': 1401537900,
+ 'upload_date': '20140531',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ },
+ # Wistia embed
+ {
+ 'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
+ 'md5': '8788b683c777a5cf25621eaf286d0c23',
+ 'info_dict': {
+ 'id': '1cfaf6b7ea',
+ 'ext': 'mov',
+ 'title': 'md5:51364a8d3d009997ba99656004b5e20d',
+ 'duration': 643.0,
+ 'filesize': 182808282,
+ 'uploader': 'education-portal.com',
+ },
+ },
+ {
+ 'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
+ 'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
+ 'info_dict': {
+ 'id': 'uxjb0lwrcz',
+ 'ext': 'mp4',
+ 'title': 'Conversation about Hexagonal Rails Part 1 - ThoughtWorks',
+ 'duration': 1715.0,
+ 'uploader': 'thoughtworks.wistia.com',
+ },
+ },
+ # Direct download with broken HEAD
+ {
+ 'url': 'http://ai-radio.org:8000/radio.opus',
+ 'info_dict': {
+ 'id': 'radio',
+ 'ext': 'opus',
+ 'title': 'radio',
+ },
+ 'params': {
+ 'skip_download': True, # infinite live stream
+ },
+ 'expected_warnings': [
+ r'501.*Not Implemented'
+ ],
+ },
+ # Soundcloud embed
+ {
+ 'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
+ 'info_dict': {
+ 'id': '174391317',
+ 'ext': 'mp3',
+ 'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
+ 'uploader': 'Sophos Security',
+ 'title': 'Chet Chat 171 - Oct 29, 2014',
+ 'upload_date': '20141029',
+ }
+ }
]
- def report_download_webpage(self, video_id):
- """Report webpage download."""
- if not self._downloader.params.get('test', False):
- self._downloader.report_warning('Falling back on generic information extractor.')
- super(GenericIE, self).report_download_webpage(video_id)
-
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
- def _send_head(self, url):
- """Check if it is a redirect, like url shorteners, in case return the new url."""
-
- class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
- """
- Subclass the HTTPRedirectHandler to make it use our
- HEADRequest also on the redirected URL
- """
- def redirect_request(self, req, fp, code, msg, headers, newurl):
- if code in (301, 302, 303, 307):
- newurl = newurl.replace(' ', '%20')
- newheaders = dict((k,v) for k,v in req.headers.items()
- if k.lower() not in ("content-length", "content-type"))
- try:
- # This function was deprecated in python 3.3 and removed in 3.4
- origin_req_host = req.get_origin_req_host()
- except AttributeError:
- origin_req_host = req.origin_req_host
- return HEADRequest(newurl,
- headers=newheaders,
- origin_req_host=origin_req_host,
- unverifiable=True)
- else:
- raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
-
- class HTTPMethodFallback(compat_urllib_request.BaseHandler):
- """
- Fallback to GET if HEAD is not allowed (405 HTTP error)
- """
- def http_error_405(self, req, fp, code, msg, headers):
- fp.read()
- fp.close()
-
- newheaders = dict((k,v) for k,v in req.headers.items()
- if k.lower() not in ("content-length", "content-type"))
- return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
- headers=newheaders,
- origin_req_host=req.get_origin_req_host(),
- unverifiable=True))
-
- # Build our opener
- opener = compat_urllib_request.OpenerDirector()
- for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
- HTTPMethodFallback, HEADRedirectHandler,
- compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
- opener.add_handler(handler())
-
- response = opener.open(HEADRequest(url))
- if response is None:
- raise ExtractorError('Invalid URL protocol')
- return response
-
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
'entries': entries,
}
+ def _extract_camtasia(self, url, video_id, webpage):
+ """ Returns None if no camtasia video can be found. """
+
+ camtasia_cfg = self._search_regex(
+ r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
+ webpage, 'camtasia configuration file', default=None)
+ if camtasia_cfg is None:
+ return None
+
+ title = self._html_search_meta('DC.title', webpage, fatal=True)
+
+ camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
+ camtasia_cfg = self._download_xml(
+ camtasia_url, video_id,
+ note='Downloading camtasia configuration',
+ errnote='Failed to download camtasia configuration')
+ fileset_node = camtasia_cfg.find('./playlist/array/fileset')
+
+ entries = []
+ for n in fileset_node.getchildren():
+ url_n = n.find('./uri')
+ if url_n is None:
+ continue
+
+ entries.append({
+ 'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
+ 'title': '%s - %s' % (title, n.tag),
+ 'url': compat_urlparse.urljoin(url, url_n.text),
+ 'duration': float_or_none(n.find('./duration').text),
+ })
+
+ return {
+ '_type': 'playlist',
+ 'entries': entries,
+ 'title': title,
+ }
+
def _real_extract(self, url):
if url.startswith('//'):
return {
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
) % (url, url), expected=True)
else:
- assert ':' in default_search
+ if ':' not in default_search:
+ default_search += ':'
return self.url_result(default_search + url)
- video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
+
+ url, smuggled_data = unsmuggle_url(url)
+ force_videoid = None
+ is_intentional = smuggled_data and smuggled_data.get('to_generic')
+ if smuggled_data and 'force_videoid' in smuggled_data:
+ force_videoid = smuggled_data['force_videoid']
+ video_id = force_videoid
+ else:
+ video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
self.to_screen('%s: Requesting header' % video_id)
- try:
- response = self._send_head(url)
+ head_req = HEADRequest(url)
+ head_response = self._request_webpage(
+ head_req, video_id,
+ note=False, errnote='Could not send HEAD request to %s' % url,
+ fatal=False)
+ if head_response is not False:
# Check for redirect
- new_url = response.geturl()
+ new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
+ if force_videoid:
+ new_url = smuggle_url(
+ new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
- # Check for direct link to a video
- content_type = response.headers.get('Content-Type', '')
- m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
- if m:
- upload_date = response.headers.get('Last-Modified')
- if upload_date:
- upload_date = unified_strdate(upload_date)
- return {
- 'id': video_id,
- 'title': os.path.splitext(url_basename(url))[0],
- 'formats': [{
- 'format_id': m.group('format_id'),
- 'url': url,
- 'vcodec': 'none' if m.group('type') == 'audio' else None
- }],
- 'upload_date': upload_date,
- }
+ full_response = None
+ if head_response is False:
+ full_response = self._request_webpage(url, video_id)
+ head_response = full_response
+
+ # Check for direct link to a video
+ content_type = head_response.headers.get('Content-Type', '')
+ m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
+ if m:
+ upload_date = unified_strdate(
+ head_response.headers.get('Last-Modified'))
+ return {
+ 'id': video_id,
+ 'title': os.path.splitext(url_basename(url))[0],
+ 'formats': [{
+ 'format_id': m.group('format_id'),
+ 'url': url,
+ 'vcodec': 'none' if m.group('type') == 'audio' else None
+ }],
+ 'upload_date': upload_date,
+ }
- except compat_urllib_error.HTTPError:
- # This may be a stupid server that doesn't like HEAD, our UA, or so
- pass
+ if not self._downloader.params.get('test', False) and not is_intentional:
+ self._downloader.report_warning('Falling back on generic information extractor.')
- try:
+ if full_response:
+ webpage = self._webpage_read_content(full_response, url, video_id)
+ else:
webpage = self._download_webpage(url, video_id)
- except ValueError:
- # since this is the last-resort InfoExtractor, if
- # this error is thrown, it'll be thrown here
- raise ExtractorError('Failed to download URL: %s' % url)
-
self.report_extraction(video_id)
# Is it an RSS feed?
except compat_xml_parse_error:
pass
+ # Is it a Camtasia project?
+ camtasia_res = self._extract_camtasia(url, video_id, webpage)
+ if camtasia_res is not None:
+ return camtasia_res
+
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
+ # Try to detect age limit automatically
+ age_limit = self._rta_search(webpage)
+ # And then there are the jokers who advertise that they use RTA,
+ # but actually don't.
+ AGE_LIMIT_MARKERS = [
+ r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
+ ]
+ if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
+ age_limit = 18
+
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
+ # Helper method
+ def _playlist_from_matches(matches, getter, ie=None):
+ urlrs = orderedSet(
+ self.url_result(self._proto_relative_url(getter(m)), ie)
+ for m in matches)
+ return self.playlist_result(
+ urlrs, playlist_id=video_id, playlist_title=video_title)
+
# Look for BrightCove:
bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
if bc_urls:
if mobj:
player_url = unescapeHTML(mobj.group('url'))
surl = smuggle_url(player_url, {'Referer': url})
- return self.url_result(surl, 'Vimeo')
+ return self.url_result(surl)
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
- r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
+ r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
if mobj:
- return self.url_result(mobj.group(1), 'Vimeo')
+ return self.url_result(mobj.group(1))
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
<iframe[^>]+?src=|
+ data-video-url=|
<embed[^>]+?src=|
- embedSWF\(?:\s*
+ embedSWF\(?:\s*|
+ new\s+SWFObject\(
)
(["\'])
- (?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
- (?:embed|v)/.+?)
+ (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
+ (?:embed|v|p)/.+?)
\1''', webpage)
if matches:
- urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
- for tuppl in matches]
- return self.playlist_result(
- urlrs, playlist_id=video_id, playlist_title=video_title)
+ return _playlist_from_matches(
+ matches, lambda m: unescapeHTML(m[1]))
# Look for embedded Dailymotion player
matches = re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
if matches:
- urlrs = [self.url_result(unescapeHTML(tuppl[1]))
- for tuppl in matches]
- return self.playlist_result(
- urlrs, playlist_id=video_id, playlist_title=video_title)
+ return _playlist_from_matches(
+ matches, lambda m: unescapeHTML(m[1]))
+
+ # Look for embedded Dailymotion playlist player (#3822)
+ m = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
+ if m:
+ playlists = re.findall(
+ r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
+ if playlists:
+ return _playlist_from_matches(
+ playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
- r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
+ r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
+ embed_url = self._proto_relative_url(
+ unescapeHTML(match.group('url')))
return {
'_type': 'url_transparent',
- 'url': unescapeHTML(match.group('url')),
+ 'url': embed_url,
'ie_key': 'Wistia',
'uploader': video_uploader,
'title': video_title,
'id': video_id,
}
+
+ match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
+ if match:
+ return {
+ '_type': 'url_transparent',
+ 'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
+ 'ie_key': 'Wistia',
+ 'uploader': video_uploader,
+ 'title': video_title,
+ 'id': match.group('id')
+ }
# Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
if mobj:
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
- mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
+ mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
if mobj:
return self.url_result(mobj.group(1), 'BlipTV')
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
- urlrs = [self.url_result(unescapeHTML(eurl), 'FunnyOrDie')
- for eurl in matches]
- return self.playlist_result(
- urlrs, playlist_id=video_id, playlist_title=video_title)
+ return _playlist_from_matches(
+ matches, getter=unescapeHTML, ie='FunnyOrDie')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
# Look for embeded soundcloud player
mobj = re.search(
- r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
+ r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
url = unescapeHTML(mobj.group('url'))
return self.url_result(url, ie='MTVServicesEmbedded')
+ # Look for embedded yahoo player
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
+ webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'), 'Yahoo')
+
+ # Look for embedded sbs.com.au player
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:www\.)sbs\.com\.au/ondemand/video/single/.+?)\1',
+ webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'), 'SBS')
+
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>https?://m\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
+ webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'), 'MLB')
+
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
+ webpage)
+ if mobj is not None:
+ return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
+
+ def check_video(vurl):
+ vpath = compat_urlparse.urlparse(vurl).path
+ vext = determine_ext(vpath)
+ return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
+
+ def filter_video(urls):
+ return list(filter(check_video, urls))
+
# Start with something easy: JW Player in SWFObject
- found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+ found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
- found = re.findall(r'''(?sx)
+ found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
- .*?file\s*:\s*["\'](.*?)["\']''', webpage)
+ .*?file\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
- found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+ found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
- found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
+ found = filter_video(re.findall(
+ r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
+ if not found:
+ # Flow player
+ found = filter_video(re.findall(r'''(?xs)
+ flowplayer\("[^"]+",\s*
+ \{[^}]+?\}\s*,
+ \s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
+ ["']?url["']?\s*:\s*["']([^"']+)["']
+ ''', webpage))
if not found:
# Try to find twitter cards info
- found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+ found = filter_video(re.findall(
+ r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
- found = re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
+ found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
- found = re.findall(r'(?s)<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage)
+ found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src="([^"]+)"', webpage)
if not found:
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
- r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
+ r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'?([^\'"]+)',
webpage)
if found:
new_url = found.group(1)
'url': video_url,
'uploader': video_uploader,
'title': video_title,
+ 'age_limit': age_limit,
})
if len(entries) == 1:
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class GlideIE(InfoExtractor):
+ IE_DESC = 'Glide mobile video messages (glide.me)'
+ _VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
+ _TEST = {
+ 'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
+ 'md5': '4466372687352851af2d131cfaa8a4c7',
+ 'info_dict': {
+ 'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
+ 'ext': 'mp4',
+ 'title': 'Damon Timm\'s Glide message',
+ 'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ title = self._html_search_regex(
+ r'<title>(.*?)</title>', webpage, 'title')
+ video_url = self.http_scheme() + self._search_regex(
+ r'<source src="(.*?)" type="video/mp4">', webpage, 'video URL')
+ thumbnail_url = self._search_regex(
+ r'<img id="video-thumbnail" src="(.*?)"',
+ webpage, 'thumbnail url', fatal=False)
+ thumbnail = (
+ thumbnail_url if thumbnail_url is None
+ else self.http_scheme() + thumbnail_url)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ 'thumbnail': thumbnail,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import random
+import math
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ compat_str,
+ compat_chr,
+ compat_ord,
+)
+
+
+class GloboIE(InfoExtractor):
+ _VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
+
+ _API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
+ _SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
+
+ _VIDEOID_REGEXES = [
+ r'\bdata-video-id="(\d+)"',
+ r'\bdata-player-videosids="(\d+)"',
+ r'<div[^>]+\bid="(\d+)"',
+ ]
+
+ _RESIGN_EXPIRATION = 86400
+
+ _TESTS = [
+ {
+ 'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
+ 'md5': '03ebf41cb7ade43581608b7d9b71fab0',
+ 'info_dict': {
+ 'id': '3654973',
+ 'ext': 'mp4',
+ 'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
+ 'duration': 251.585,
+ 'uploader': 'SporTV',
+ 'uploader_id': 698,
+ 'like_count': int,
+ }
+ },
+ {
+ 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
+ 'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
+ 'info_dict': {
+ 'id': '3607726',
+ 'ext': 'mp4',
+ 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
+ 'duration': 103.204,
+ 'uploader': 'Globo.com',
+ 'uploader_id': 265,
+ 'like_count': int,
+ }
+ },
+ {
+ 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
+ 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
+ 'info_dict': {
+ 'id': '3652183',
+ 'ext': 'mp4',
+ 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
+ 'duration': 110.711,
+ 'uploader': 'Rede Globo',
+ 'uploader_id': 196,
+ 'like_count': int,
+ }
+ },
+ ]
+
+ class MD5():
+ HEX_FORMAT_LOWERCASE = 0
+ HEX_FORMAT_UPPERCASE = 1
+ BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
+ BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
+ PADDING = '=0xFF01DD'
+ hexcase = 0
+ b64pad = ''
+
+ def __init__(self):
+ pass
+
+ class JSArray(list):
+ def __getitem__(self, y):
+ try:
+ return list.__getitem__(self, y)
+ except IndexError:
+ return 0
+
+ def __setitem__(self, i, y):
+ try:
+ return list.__setitem__(self, i, y)
+ except IndexError:
+ self.extend([0] * (i - len(self) + 1))
+ self[-1] = y
+
+ @classmethod
+ def hex_md5(cls, param1):
+ return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
+
+ @classmethod
+ def b64_md5(cls, param1, param2=None):
+ return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
+
+ @classmethod
+ def any_md5(cls, param1, param2):
+ return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
+
+ @classmethod
+ def rstr_md5(cls, param1):
+ return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
+
+ @classmethod
+ def rstr2hex(cls, param1):
+ _loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
+ _loc_3 = ''
+ for _loc_5 in range(0, len(param1)):
+ _loc_4 = compat_ord(param1[_loc_5])
+ _loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
+ return _loc_3
+
+ @classmethod
+ def rstr2b64(cls, param1):
+ _loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
+ _loc_3 = ''
+ _loc_4 = len(param1)
+ for _loc_5 in range(0, _loc_4, 3):
+ _loc_6_1 = compat_ord(param1[_loc_5]) << 16
+ _loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
+ _loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
+ _loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
+ for _loc_7 in range(0, 4):
+ if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
+ _loc_3 += cls.b64pad
+ else:
+ _loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
+ return _loc_3
+
+ @staticmethod
+ def rstr2any(param1, param2):
+ _loc_3 = len(param2)
+ _loc_4 = []
+ _loc_9 = [0] * ((len(param1) >> 2) + 1)
+ for _loc_5 in range(0, len(_loc_9)):
+ _loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
+
+ while len(_loc_9) > 0:
+ _loc_8 = []
+ _loc_7 = 0
+ for _loc_5 in range(0, len(_loc_9)):
+ _loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
+ _loc_6 = math.floor(_loc_7 / _loc_3)
+ _loc_7 -= _loc_6 * _loc_3
+ if len(_loc_8) > 0 or _loc_6 > 0:
+ _loc_8[len(_loc_8)] = _loc_6
+
+ _loc_4[len(_loc_4)] = _loc_7
+ _loc_9 = _loc_8
+
+ _loc_10 = ''
+ _loc_5 = len(_loc_4) - 1
+ while _loc_5 >= 0:
+ _loc_10 += param2[_loc_4[_loc_5]]
+ _loc_5 -= 1
+
+ return _loc_10
+
+ @classmethod
+ def str2rstr_utf8(cls, param1, param2=None):
+ _loc_3 = ''
+ _loc_4 = -1
+ if not param2:
+ param2 = cls.PADDING
+ param1 = param1 + param2[1:9]
+ while True:
+ _loc_4 += 1
+ if _loc_4 >= len(param1):
+ break
+ _loc_5 = compat_ord(param1[_loc_4])
+ _loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
+ if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
+ _loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
+ _loc_4 += 1
+ if _loc_5 <= 127:
+ _loc_3 += compat_chr(_loc_5)
+ continue
+ if _loc_5 <= 2047:
+ _loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
+ continue
+ if _loc_5 <= 65535:
+ _loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
+ 128 | _loc_5 & 63)
+ continue
+ if _loc_5 <= 2097151:
+ _loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
+ 128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
+ return _loc_3
+
+ @staticmethod
+ def rstr2binl(param1):
+ _loc_2 = [0] * ((len(param1) >> 2) + 1)
+ for _loc_3 in range(0, len(_loc_2)):
+ _loc_2[_loc_3] = 0
+ for _loc_3 in range(0, len(param1) * 8, 8):
+ _loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
+ return _loc_2
+
+ @staticmethod
+ def binl2rstr(param1):
+ _loc_2 = ''
+ for _loc_3 in range(0, len(param1) * 32, 8):
+ _loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
+ return _loc_2
+
+ @classmethod
+ def binl_md5(cls, param1, param2):
+ param1 = cls.JSArray(param1)
+ param1[param2 >> 5] |= 128 << param2 % 32
+ param1[(param2 + 64 >> 9 << 4) + 14] = param2
+ _loc_3 = 1732584193
+ _loc_4 = -271733879
+ _loc_5 = -1732584194
+ _loc_6 = 271733878
+ for _loc_7 in range(0, len(param1), 16):
+ _loc_8 = _loc_3
+ _loc_9 = _loc_4
+ _loc_10 = _loc_5
+ _loc_11 = _loc_6
+ _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
+ _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
+ _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
+ _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
+ _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
+ _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
+ _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
+ _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
+ _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
+ _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
+ _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
+ _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
+ _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
+ _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
+ _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
+ _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
+ _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
+ _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
+ _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
+ _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
+ _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
+ _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
+ _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
+ _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
+ _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
+ _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
+ _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
+ _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
+ _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
+ _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
+ _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
+ _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
+ _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
+ _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
+ _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
+ _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
+ _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
+ _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
+ _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
+ _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
+ _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
+ _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
+ _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
+ _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
+ _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
+ _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
+ _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
+ _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
+ _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
+ _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
+ _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
+ _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
+ _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
+ _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
+ _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
+ _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
+ _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
+ _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
+ _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
+ _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
+ _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
+ _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
+ _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
+ _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
+ _loc_3 = cls.safe_add(_loc_3, _loc_8)
+ _loc_4 = cls.safe_add(_loc_4, _loc_9)
+ _loc_5 = cls.safe_add(_loc_5, _loc_10)
+ _loc_6 = cls.safe_add(_loc_6, _loc_11)
+ return [_loc_3, _loc_4, _loc_5, _loc_6]
+
+ @classmethod
+ def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
+ return cls.safe_add(
+ cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
+
+ @classmethod
+ def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
+ return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
+
+ @classmethod
+ def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
+ return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
+
+ @classmethod
+ def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
+ return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
+
+ @classmethod
+ def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
+ return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
+
+ @classmethod
+ def safe_add(cls, param1, param2):
+ _loc_3 = (param1 & 65535) + (param2 & 65535)
+ _loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
+ return cls.lshift(_loc_4, 16) | _loc_3 & 65535
+
+ @classmethod
+ def bit_rol(cls, param1, param2):
+ return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
+
+ @staticmethod
+ def lshift(value, count):
+ r = (0xFFFFFFFF & value) << count
+ return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+ video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
+
+ video = self._download_json(
+ self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
+
+ title = video['title']
+ duration = float_or_none(video['duration'], 1000)
+ like_count = video['likes']
+ uploader = video['channel']
+ uploader_id = video['channel_id']
+
+ formats = []
+
+ for resource in video['resources']:
+ resource_id = resource.get('_id')
+ if not resource_id:
+ continue
+
+ security = self._download_json(
+ self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
+ video_id, 'Downloading security hash for %s' % resource_id)
+
+ security_hash = security.get('hash')
+ if not security_hash:
+ message = security.get('message')
+ if message:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, message), expected=True)
+ continue
+
+ hash_code = security_hash[:2]
+ received_time = int(security_hash[2:12])
+ received_random = security_hash[12:22]
+ received_md5 = security_hash[22:]
+
+ sign_time = received_time + self._RESIGN_EXPIRATION
+ padding = '%010d' % random.randint(1, 10000000000)
+
+ signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
+ signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
+
+ formats.append({
+ 'url': '%s?h=%s&k=%s' % (resource['url'], signed_hash, 'flash'),
+ 'format_id': resource_id,
+ 'height': resource['height']
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'duration': duration,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'like_count': like_count,
+ 'formats': formats
+ }
\ No newline at end of file
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
video_id, 'Downloading player config XML')
- video_url = config.find('.//file').text
- uploader = config.find('.//author').text
- timestamp = parse_iso8601(config.find('.//date').text)
- duration = parse_duration(config.find('.//duration').text)
- thumbnail = config.find('.//image').text
+ video_url = config.find('file').text
+ uploader = config.find('author').text
+ timestamp = parse_iso8601(config.find('date').text)
+ duration = parse_duration(config.find('duration').text)
+ thumbnail = config.find('image').text
media = self._download_xml(
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
- title = media.find('.//title').text
+ title = media.find('title').text
return {
'id': video_id,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urlparse,
+ determine_ext,
+)
+
+
+class GolemIE(InfoExtractor):
+ _VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
+ _TEST = {
+ 'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
+ 'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
+ 'info_dict': {
+ 'id': '14095',
+ 'format_id': 'high',
+ 'ext': 'mp4',
+ 'title': 'iPhone 6 und 6 Plus - Test',
+ 'duration': 300.44,
+ 'filesize': 65309548,
+ }
+ }
+
+ _PREFIX = 'http://video.golem.de'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ config = self._download_xml(
+ 'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
+
+ info = {
+ 'id': video_id,
+ 'title': config.findtext('./title', 'golem'),
+ 'duration': self._float(config.findtext('./playtime'), 'duration'),
+ }
+
+ formats = []
+ for e in config:
+ url = e.findtext('./url')
+ if not url:
+ continue
+
+ formats.append({
+ 'format_id': e.tag,
+ 'url': compat_urlparse.urljoin(self._PREFIX, url),
+ 'height': self._int(e.get('height'), 'height'),
+ 'width': self._int(e.get('width'), 'width'),
+ 'filesize': self._int(e.findtext('filesize'), 'filesize'),
+ 'ext': determine_ext(e.findtext('./filename')),
+ })
+ self._sort_formats(formats)
+ info['formats'] = formats
+
+ thumbnails = []
+ for e in config.findall('.//teaser'):
+ url = e.findtext('./url')
+ if not url:
+ continue
+ thumbnails.append({
+ 'url': compat_urlparse.urljoin(self._PREFIX, url),
+ 'width': self._int(e.get('width'), 'thumbnail width'),
+ 'height': self._int(e.get('height'), 'thumbnail height'),
+ })
+ info['thumbnails'] = thumbnails
+
+ return info
# coding: utf-8
from __future__ import unicode_literals
-import datetime
import re
+import codecs
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
+from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
+ 'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
- 'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
- # Extract id from URL
- mobj = re.match(self._VALID_URL, url)
-
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
- self.report_extraction(video_id)
-
- # Extract update date
- upload_date = self._html_search_regex(
+ title = self._og_search_description(webpage).splitlines()[0]
+ upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
- webpage, 'upload date', fatal=False, flags=re.VERBOSE)
- if upload_date:
- # Convert timestring to a format suitable for filename
- upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
- upload_date = upload_date.strftime('%Y%m%d')
-
- # Extract uploader
- uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
- webpage, 'uploader', fatal=False)
-
- # Extract title
- # Get the first line for title
- video_title = self._og_search_description(webpage).splitlines()[0]
+ webpage, 'upload date', fatal=False, flags=re.VERBOSE))
+ uploader = self._html_search_regex(
+ r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
- video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
+ video_page = self._search_regex(
+ r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
- # Extract video links all sizes
- pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
- mobj = re.findall(pattern, webpage)
- if len(mobj) == 0:
- raise ExtractorError('Unable to extract video links')
-
- # Sort in resolution
- links = sorted(mobj)
+ def unicode_escape(s):
+ decoder = codecs.getdecoder('unicode_escape')
+ return re.sub(
+ r'\\u[0-9a-fA-F]{4,}',
+ lambda m: decoder(m.group(0))[0],
+ s)
- # Choose the lowest of the sort, i.e. highest resolution
- video_url = links[-1]
- # Only get the url. The resolution part in the tuple has no use anymore
- video_url = video_url[-1]
- # Treat escaped \u0026 style hex
- try:
- video_url = video_url.decode("unicode_escape")
- except AttributeError: # Python 3
- video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+ # Extract video links all sizes
+ formats = [{
+ 'url': unicode_escape(video_url),
+ 'ext': 'flv',
+ 'width': int(width),
+ 'height': int(height),
+ } for width, height, video_url in re.findall(
+ r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
+ self._sort_formats(formats)
return {
'id': video_id,
- 'url': video_url,
+ 'title': title,
'uploader': uploader,
'upload_date': upload_date,
- 'title': video_title,
- 'ext': 'flv',
+ 'formats': formats,
}
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
+ _TEST = {
+ 'url': 'gvsearch15:python language',
+ 'info_dict': {
+ 'id': 'python language',
+ 'title': 'python language',
+ },
+ 'playlist_count': 15,
+ }
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
from .common import InfoExtractor
from ..utils import (
+ ExtractorError,
determine_ext,
compat_urllib_parse,
compat_urllib_request,
class GorillaVidIE(InfoExtractor):
- IE_DESC = 'GorillaVid.in and daclips.in'
+ IE_DESC = 'GorillaVid.in, daclips.in and movpod.in'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
- (?:daclips\.in|gorillavid\.in))/
+ (?:daclips\.in|gorillavid\.in|movpod\.in))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
+ _FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
+
_TESTS = [{
'url': 'http://gorillavid.in/06y9juieqpmi',
'md5': '5ae4a3580620380619678ee4875893ba',
'info_dict': {
'id': '06y9juieqpmi',
'ext': 'flv',
- 'title': 'Rebecca Black My Moment Official Music Video Reaction',
+ 'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'info_dict': {
'id': '3rso4kdn6f9m',
'ext': 'mp4',
- 'title': 'Micro Pig piglets ready on 16th July 2009',
+ 'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
'thumbnail': 're:http://.*\.jpg',
- },
+ }
+ }, {
+ 'url': 'http://movpod.in/0wguyyxi1yca',
+ 'only_matching': True,
}]
def _real_extract(self, url):
webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
+ if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
+ raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
fields = dict(re.findall(r'''(?x)<input\s+
type="hidden"\s+
name="([^"]+)"\s+
webpage = self._download_webpage(req, video_id, 'Downloading video page')
- title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
- thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
- url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
+ title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)</span>', webpage, 'title')
+ video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url')
+ thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False)
formats = [{
'format_id': 'sd',
- 'url': url,
- 'ext': determine_ext(url),
+ 'url': video_url,
+ 'ext': determine_ext(video_url),
'quality': 1,
}]
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import time
+import math
+import os.path
+import re
+
+
+from .common import InfoExtractor
+from ..utils import ExtractorError, compat_urllib_request, compat_html_parser
+
+from ..utils import (
+ compat_urllib_parse,
+ compat_urlparse,
+)
+
+
+class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
+ def __init__(self):
+ self._current_object = None
+ self.objects = []
+ compat_html_parser.HTMLParser.__init__(self)
+
+ def handle_starttag(self, tag, attrs):
+ attrs = dict((k, v) for k, v in attrs)
+ if tag == 'object':
+ self._current_object = {'attrs': attrs, 'params': []}
+ elif tag == 'param':
+ self._current_object['params'].append(attrs)
+
+ def handle_endtag(self, tag):
+ if tag == 'object':
+ self.objects.append(self._current_object)
+ self._current_object = None
+
+ @classmethod
+ def extract_object_tags(cls, html):
+ p = cls()
+ p.feed(html)
+ p.close()
+ return p.objects
+
+
+class GroovesharkIE(InfoExtractor):
+ _VALID_URL = r'https?://(www\.)?grooveshark\.com/#!/s/([^/]+)/([^/]+)'
+ _TEST = {
+ 'url': 'http://grooveshark.com/#!/s/Jolene+Tenth+Key+Remix+Ft+Will+Sessions/6SS1DW?src=5',
+ 'md5': '7ecf8aefa59d6b2098517e1baa530023',
+ 'info_dict': {
+ 'id': '6SS1DW',
+ 'title': 'Jolene (Tenth Key Remix ft. Will Sessions)',
+ 'ext': 'mp3',
+ 'duration': 227,
+ }
+ }
+
+ do_playerpage_request = True
+ do_bootstrap_request = True
+
+ def _parse_target(self, target):
+ uri = compat_urlparse.urlparse(target)
+ hash = uri.fragment[1:].split('?')[0]
+ token = os.path.basename(hash.rstrip('/'))
+ return (uri, hash, token)
+
+ def _build_bootstrap_url(self, target):
+ (uri, hash, token) = self._parse_target(target)
+ query = 'getCommunicationToken=1&hash=%s&%d' % (compat_urllib_parse.quote(hash, safe=''), self.ts)
+ return (compat_urlparse.urlunparse((uri.scheme, uri.netloc, '/preload.php', None, query, None)), token)
+
+ def _build_meta_url(self, target):
+ (uri, hash, token) = self._parse_target(target)
+ query = 'hash=%s&%d' % (compat_urllib_parse.quote(hash, safe=''), self.ts)
+ return (compat_urlparse.urlunparse((uri.scheme, uri.netloc, '/preload.php', None, query, None)), token)
+
+ def _build_stream_url(self, meta):
+ return compat_urlparse.urlunparse(('http', meta['streamKey']['ip'], '/stream.php', None, None, None))
+
+ def _build_swf_referer(self, target, obj):
+ (uri, _, _) = self._parse_target(target)
+ return compat_urlparse.urlunparse((uri.scheme, uri.netloc, obj['attrs']['data'], None, None, None))
+
+ def _transform_bootstrap(self, js):
+ return re.split('(?m)^\s*try\s*{', js)[0] \
+ .split(' = ', 1)[1].strip().rstrip(';')
+
+ def _transform_meta(self, js):
+ return js.split('\n')[0].split('=')[1].rstrip(';')
+
+ def _get_meta(self, target):
+ (meta_url, token) = self._build_meta_url(target)
+ self.to_screen('Metadata URL: %s' % meta_url)
+
+ headers = {'Referer': compat_urlparse.urldefrag(target)[0]}
+ req = compat_urllib_request.Request(meta_url, headers=headers)
+ res = self._download_json(req, token,
+ transform_source=self._transform_meta)
+
+ if 'getStreamKeyWithSong' not in res:
+ raise ExtractorError(
+ 'Metadata not found. URL may be malformed, or Grooveshark API may have changed.')
+
+ if res['getStreamKeyWithSong'] is None:
+ raise ExtractorError(
+ 'Metadata download failed, probably due to Grooveshark anti-abuse throttling. Wait at least an hour before retrying from this IP.',
+ expected=True)
+
+ return res['getStreamKeyWithSong']
+
+ def _get_bootstrap(self, target):
+ (bootstrap_url, token) = self._build_bootstrap_url(target)
+
+ headers = {'Referer': compat_urlparse.urldefrag(target)[0]}
+ req = compat_urllib_request.Request(bootstrap_url, headers=headers)
+ res = self._download_json(req, token, fatal=False,
+ note='Downloading player bootstrap data',
+ errnote='Unable to download player bootstrap data',
+ transform_source=self._transform_bootstrap)
+ return res
+
+ def _get_playerpage(self, target):
+ (_, _, token) = self._parse_target(target)
+
+ webpage = self._download_webpage(
+ target, token,
+ note='Downloading player page',
+ errnote='Unable to download player page',
+ fatal=False)
+
+ if webpage is not None:
+ # Search (for example German) error message
+ error_msg = self._html_search_regex(
+ r'<div id="content">\s*<h2>(.*?)</h2>', webpage,
+ 'error message', default=None)
+ if error_msg is not None:
+ error_msg = error_msg.replace('\n', ' ')
+ raise ExtractorError('Grooveshark said: %s' % error_msg)
+
+ if webpage is not None:
+ o = GroovesharkHtmlParser.extract_object_tags(webpage)
+ return (webpage, [x for x in o if x['attrs']['id'] == 'jsPlayerEmbed'])
+
+ return (webpage, None)
+
+ def _real_initialize(self):
+ self.ts = int(time.time() * 1000) # timestamp in millis
+
+ def _real_extract(self, url):
+ (target_uri, _, token) = self._parse_target(url)
+
+ # 1. Fill cookiejar by making a request to the player page
+ swf_referer = None
+ if self.do_playerpage_request:
+ (_, player_objs) = self._get_playerpage(url)
+ if player_objs is not None:
+ swf_referer = self._build_swf_referer(url, player_objs[0])
+ self.to_screen('SWF Referer: %s' % swf_referer)
+
+ # 2. Ask preload.php for swf bootstrap data to better mimic webapp
+ if self.do_bootstrap_request:
+ bootstrap = self._get_bootstrap(url)
+ self.to_screen('CommunicationToken: %s' % bootstrap['getCommunicationToken'])
+
+ # 3. Ask preload.php for track metadata.
+ meta = self._get_meta(url)
+
+ # 4. Construct stream request for track.
+ stream_url = self._build_stream_url(meta)
+ duration = int(math.ceil(float(meta['streamKey']['uSecs']) / 1000000))
+ post_dict = {'streamKey': meta['streamKey']['streamKey']}
+ post_data = compat_urllib_parse.urlencode(post_dict).encode('utf-8')
+ headers = {
+ 'Content-Length': len(post_data),
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ }
+ if swf_referer is not None:
+ headers['Referer'] = swf_referer
+
+ return {
+ 'id': token,
+ 'title': meta['song']['Name'],
+ 'http_method': 'POST',
+ 'url': stream_url,
+ 'ext': 'mp3',
+ 'format': 'mp3 audio',
+ 'duration': duration,
+ 'http_post_data': post_data,
+ 'http_headers': headers,
+ }
# -*- coding: utf-8 -*-
-
-import re
-import json
+from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import determine_ext
+
class HarkIE(InfoExtractor):
- _VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
+ _VALID_URL = r'https?://www\.hark\.com/clips/(?P<id>.+?)-.+'
_TEST = {
- u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
- u'file': u'mmbzyhkgny.mp3',
- u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
- u'info_dict': {
- u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
- u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
- u'duration': 11,
+ 'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
+ 'md5': '6783a58491b47b92c7c1af5a77d4cbee',
+ 'info_dict': {
+ 'id': 'mmbzyhkgny',
+ 'ext': 'mp3',
+ 'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013',
+ 'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
+ 'duration': 11,
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
- json_url = "http://www.hark.com/clips/%s.json" %(video_id)
- info_json = self._download_webpage(json_url, video_id)
- info = json.loads(info_json)
- final_url = info['url']
+ video_id = self._match_id(url)
+ data = self._download_json(
+ 'http://www.hark.com/clips/%s.json' % video_id, video_id)
- return {'id': video_id,
- 'url' : final_url,
- 'title': info['name'],
- 'ext': determine_ext(final_url),
- 'description': info['description'],
- 'thumbnail': info['image_original'],
- 'duration': info['duration'],
- }
+ return {
+ 'id': video_id,
+ 'url': data['url'],
+ 'title': data['name'],
+ 'description': data.get('description'),
+ 'thumbnail': data.get('image_original'),
+ 'duration': data.get('duration'),
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ get_meta_content,
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class HeiseIE(InfoExtractor):
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?heise\.de/video/artikel/
+ .+?(?P<id>[0-9]+)\.html(?:$|[?#])
+ '''
+ _TEST = {
+ 'url': (
+ 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
+ ),
+ 'md5': 'ffed432483e922e88545ad9f2f15d30e',
+ 'info_dict': {
+ 'id': '2404147',
+ 'ext': 'mp4',
+ 'title': (
+ "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
+ ),
+ 'format_id': 'mp4_720',
+ 'timestamp': 1411812600,
+ 'upload_date': '20140927',
+ 'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
+ 'thumbnail': 're:https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ container_id = self._search_regex(
+ r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
+ webpage, 'container ID')
+ sequenz_id = self._search_regex(
+ r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
+ webpage, 'sequenz ID')
+ data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
+ doc = self._download_xml(data_url, video_id)
+
+ info = {
+ 'id': video_id,
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'timestamp': parse_iso8601(get_meta_content('date', webpage)),
+ 'description': self._og_search_description(webpage),
+ }
+
+ title = get_meta_content('fulltitle', webpage)
+ if title:
+ info['title'] = title
+ else:
+ info['title'] = self._og_search_title(webpage)
+
+ formats = []
+ for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
+ label = source_node.attrib['label']
+ height = int_or_none(self._search_regex(
+ r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
+ formats.append({
+ 'url': source_node.attrib['file'],
+ 'format_note': label,
+ 'height': height,
+ })
+ self._sort_formats(formats)
+ info['formats'] = formats
+
+ return info
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_duration,
+)
+
+
+class HornBunnyIE(InfoExtractor):
+ _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
+ _TEST = {
+ 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
+ 'md5': '95e40865aedd08eff60272b704852ad7',
+ 'info_dict': {
+ 'id': '5227',
+ 'ext': 'flv',
+ 'title': 'panty slut jerk off instruction',
+ 'duration': 550,
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(
+ url, video_id, note='Downloading initial webpage')
+ title = self._html_search_regex(
+ r'class="title">(.*?)</h2>', webpage, 'title')
+ redirect_url = self._html_search_regex(
+ r'pg&settings=(.*?)\|0"\);', webpage, 'title')
+ webpage2 = self._download_webpage(redirect_url, video_id)
+ video_url = self._html_search_regex(
+ r'flvMask:(.*?);', webpage2, 'video_url')
+
+ duration = parse_duration(self._search_regex(
+ r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
+ webpage, 'duration', fatal=False))
+ view_count = int_or_none(self._search_regex(
+ r'<strong>Views:</strong>\s*(\d+)</div>',
+ webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'ext': 'flv',
+ 'duration': duration,
+ 'view_count': view_count,
+ 'age_limit': 18,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_urllib_request,
+ int_or_none,
+ urlencode_postdata,
+)
+
+
+class HostingBulkIE(InfoExtractor):
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?hostingbulk\.com/
+ (?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
+ _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
+ _TEST = {
+ 'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
+ 'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
+ 'info_dict': {
+ 'id': 'n0ulw1hv20fm',
+ 'ext': 'mp4',
+ 'title': 'md5:5afeba33f48ec87219c269e054afd622',
+ 'filesize': 6816081,
+ 'thumbnail': 're:^http://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
+
+ # Custom request with cookie to set language to English, so our file
+ # deleted regex would work.
+ request = compat_urllib_request.Request(
+ url, headers={'Cookie': 'lang=english'})
+ webpage = self._download_webpage(request, video_id)
+
+ if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+ raise ExtractorError('Video %s does not exist' % video_id,
+ expected=True)
+
+ title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
+ filesize = int_or_none(
+ self._search_regex(
+ r'<small>\((\d+)\sbytes?\)</small>',
+ webpage,
+ 'filesize',
+ fatal=False
+ )
+ )
+ thumbnail = self._search_regex(
+ r'<img src="([^"]+)".+?class="pic"',
+ webpage, 'thumbnail', fatal=False)
+
+ fields = dict(re.findall(r'''(?x)<input\s+
+ type="hidden"\s+
+ name="([^"]+)"\s+
+ value="([^"]*)"
+ ''', webpage))
+
+ request = compat_urllib_request.Request(url, urlencode_postdata(fields))
+ request.add_header('Content-type', 'application/x-www-form-urlencoded')
+ response = self._request_webpage(request, video_id,
+ 'Submiting download request')
+ video_url = response.geturl()
+
+ formats = [{
+ 'format_id': 'sd',
+ 'filesize': filesize,
+ 'url': video_url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+import json
+import random
+import string
+
+from .common import InfoExtractor
+from ..utils import find_xpath_attr
+
+
+class HowStuffWorksIE(InfoExtractor):
+ _VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*\d+-(?P<id>.+?)-video\.htm'
+ _TESTS = [
+ {
+ 'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
+ 'info_dict': {
+ 'id': '450221',
+ 'display_id': 'cool-jobs-iditarod-musher',
+ 'ext': 'flv',
+ 'title': 'Cool Jobs - Iditarod Musher',
+ 'description': 'md5:82bb58438a88027b8186a1fccb365f90',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ # md5 is not consistent
+ 'skip_download': True
+ }
+ },
+ {
+ 'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
+ 'info_dict': {
+ 'id': '453464',
+ 'display_id': 'survival-zone-food-and-water-in-the-savanna',
+ 'ext': 'mp4',
+ 'title': 'Survival Zone: Food and Water In the Savanna',
+ 'description': 'md5:7e1c89f6411434970c15fa094170c371',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ # md5 is not consistent
+ 'skip_download': True
+ }
+ },
+ {
+ 'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
+ 'info_dict': {
+ 'id': '440011',
+ 'display_id': 'sword-swallowing-1-by-dan-meyer',
+ 'ext': 'flv',
+ 'title': 'Sword Swallowing #1 by Dan Meyer',
+ 'description': 'md5:b2409e88172913e2e7d3d1159b0ef735',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ # md5 is not consistent
+ 'skip_download': True
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('id')
+ webpage = self._download_webpage(url, display_id)
+
+ content_id = self._search_regex(r'var siteSectionId="(\d+)";', webpage, 'content id')
+
+ mp4 = self._search_regex(
+ r'''(?xs)var\s+clip\s*=\s*{\s*
+ .+?\s*
+ content_id\s*:\s*%s\s*,\s*
+ .+?\s*
+ mp4\s*:\s*\[(.*?),?\]\s*
+ };\s*
+ videoData\.push\(clip\);''' % content_id,
+ webpage, 'mp4', fatal=False, default=None)
+
+ smil = self._download_xml(
+ 'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % content_id,
+ content_id, 'Downloading video SMIL')
+
+ http_base = find_xpath_attr(
+ smil,
+ './{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
+ 'name',
+ 'httpBase').get('content')
+
+ def random_string(str_len=0):
+ return ''.join([random.choice(string.ascii_uppercase) for _ in range(str_len)])
+
+ URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=%s&g=%s' % (random_string(5), random_string(12))
+
+ formats = []
+
+ if mp4:
+ for video in json.loads('[%s]' % mp4):
+ bitrate = video['bitrate']
+ fmt = {
+ 'url': video['src'].replace('http://pmd.video.howstuffworks.com', http_base) + URL_SUFFIX,
+ 'format_id': bitrate,
+ }
+ m = re.search(r'(?P<vbr>\d+)[Kk]', bitrate)
+ if m:
+ fmt['vbr'] = int(m.group('vbr'))
+ formats.append(fmt)
+ else:
+ for video in smil.findall(
+ './/{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
+ vbr = int(video.attrib['system-bitrate']) / 1000
+ formats.append({
+ 'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
+ 'format_id': '%dk' % vbr,
+ 'vbr': vbr,
+ })
+
+ self._sort_formats(formats)
+
+ title = self._og_search_title(webpage)
+ TITLE_SUFFIX = ' : HowStuffWorks'
+ if title.endswith(TITLE_SUFFIX):
+ title = title[:-len(TITLE_SUFFIX)]
+
+ description = self._og_search_description(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ return {
+ 'id': content_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
data = self._download_json(api_url, video_id)['data']
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- html_title = self._html_search_regex(
- r'<title>(.+?)</title>',
+ title = self._html_search_regex(
+ r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
webpage, 'title')
- title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
uploader_id = self._html_search_regex(
r'@([^ ]+)', title, 'uploader name', fatal=False)
_DESCRIPTION_RE = [
r'<span class="page-object-description">(.+?)</span>',
r'id="my_show_video">.*?<p>(.*?)</p>',
+ r'<meta name="description" content="(.*?)"',
]
_TESTS = [
'skip_download': True,
},
},
+ {
+ 'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
+ 'md5': '4e9a0bda1e5eebd31ddcf86ec0b9b3c7',
+ 'info_dict': {
+ 'id': '078fdd005f6d3c02f63d795faa1b984f',
+ 'ext': 'mp4',
+ 'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
+ 'description': 'Giant skeletons, bloody hunts, and captivating'
+ ' natural beauty take our breath away.',
+ },
+ },
]
def _find_video_id(self, webpage):
res_id = [
+ r'"video_id"\s*:\s*"(.*?)"',
r'data-video-id="(.+?)"',
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
+ r'class="hero-poster[^"]*?"[^>]*id="(.+?)"',
]
return self._search_regex(res_id, webpage, 'video id')
name_or_id = mobj.group('name_or_id')
page_type = mobj.group('type')
webpage = self._download_webpage(url, name_or_id)
- if page_type == 'articles':
- video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url')
- return self.url_result(video_url, ie='IGN')
- elif page_type != 'video':
+ if page_type != 'video':
multiple_urls = re.findall(
- '<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
+ '<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
- return [self.url_result(u, ie='IGN') for u in multiple_urls]
+ entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
+ return {
+ '_type': 'playlist',
+ 'id': name_or_id,
+ 'entries': entries,
+ }
video_id = self._find_video_id(webpage)
result = self._get_video_info(video_id)
class OneUPIE(IGNIE):
- _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
+ _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
IE_NAME = '1up.com'
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
_TESTS = [{
- 'url': 'http://gamevideos.1up.com/video/id/34976',
+ 'url': 'http://gamevideos.1up.com/video/id/34976.html',
'md5': '68a54ce4ebc772e4b71e3123d413163d',
'info_dict': {
'id': '34976',
IE_NAME = 'imdb:list'
IE_DESC = 'Internet Movie Database lists'
_VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
+ _TEST = {
+ 'url': 'http://www.imdb.com/list/JFs9NWw6XI0',
+ 'info_dict': {
+ 'id': 'JFs9NWw6XI0',
+ 'title': 'March 23, 2012 Releases',
+ },
+ 'playlist_count': 7,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
_VALID_URL = r'http://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
+ _TEST = {
+ 'url': 'http://instagram.com/porsche',
+ 'info_dict': {
+ 'id': 'porsche',
+ 'title': 'porsche',
+ },
+ 'playlist_mincount': 2,
+ 'playlist': [{
+ 'info_dict': {
+ 'id': '614605558512799803_462752227',
+ 'ext': 'mp4',
+ 'title': '#Porsche Intelligent Performance.',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Porsche',
+ 'uploader_id': 'porsche',
+ 'timestamp': 1387486713,
+ 'upload_date': '20131219',
+ },
+ }],
+ 'params': {
+ 'extract_flat': True,
+ 'skip_download': True,
+ }
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
_TEST = {
- u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
- u'file': u'452693.mp4',
- u'info_dict': {
- u'title': u'SKYFALL',
- u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
- u'duration': 153,
+ 'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
+ 'info_dict': {
+ 'id': '452693',
+ 'ext': 'mp4',
+ 'title': 'SKYFALL',
+ 'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
+ 'duration': 149,
},
}
url = self._build_url(query)
flashconfiguration = self._download_xml(url, video_id,
- u'Downloading flash configuration')
+ 'Downloading flash configuration')
file_url = flashconfiguration.find('file').text
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
# Replace some of the parameters in the query to get the best quality
lambda m: self._clean_query(m.group()),
file_url)
info = self._download_xml(file_url, video_id,
- u'Downloading video info')
+ 'Downloading video info')
item = info.find('channel/item')
def _bp(p):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
+ _TESTS = [{
+ 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
+ 'info_dict': {
+ 'id': 'dvoe_iz_lartsa',
+ 'title': 'Двое из ларца (2006 - 2008)',
+ },
+ 'playlist_mincount': 24,
+ }, {
+ 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
+ 'info_dict': {
+ 'id': 'dvoe_iz_lartsa/season1',
+ 'title': 'Двое из ларца (2006 - 2008) 1 сезон',
+ },
+ 'playlist_mincount': 12,
+ }]
def _extract_entries(self, html, compilation_id):
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
parse_iso8601,
determine_ext,
int_or_none,
+ float_or_none,
str_to_int,
)
class IzleseneIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?:www|m)\.)?izlesene\.com/(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)'
- _STREAM_URL = 'http://panel.izlesene.com/api/streamurl/{id:}/{format:}'
- _TEST = {
- 'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
- 'md5': '4384f9f0ea65086734b881085ee05ac2',
- 'info_dict': {
- 'id': '7599694',
- 'ext': 'mp4',
- 'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
- 'description': 'Annesi oğluna doğum günü hediyesi olarak minecraft cd si alıyor, ve çocuk hunharca seviniyor',
- 'thumbnail': 're:^http://.*\.jpg',
- 'uploader_id': 'pelikzzle',
- 'timestamp': 1404298698,
- 'upload_date': '20140702',
- 'duration': 95.395,
- 'age_limit': 0,
- }
- }
+ _VALID_URL = r'''(?x)
+ https?://(?:(?:www|m)\.)?izlesene\.com/
+ (?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
+ '''
+ _TESTS = [
+ {
+ 'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
+ 'md5': '4384f9f0ea65086734b881085ee05ac2',
+ 'info_dict': {
+ 'id': '7599694',
+ 'ext': 'mp4',
+ 'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
+ 'description': 'md5:253753e2655dde93f59f74b572454f6d',
+ 'thumbnail': 're:^http://.*\.jpg',
+ 'uploader_id': 'pelikzzle',
+ 'timestamp': 1404298698,
+ 'upload_date': '20140702',
+ 'duration': 95.395,
+ 'age_limit': 0,
+ }
+ },
+ {
+ 'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
+ 'md5': '97f09b6872bffa284cb7fa4f6910cb72',
+ 'info_dict': {
+ 'id': '17997',
+ 'ext': 'mp4',
+ 'title': 'Tarkan Dortmund 2006 Konseri',
+ 'description': 'Tarkan Dortmund 2006 Konseri',
+ 'thumbnail': 're:^http://.*\.jpg',
+ 'uploader_id': 'parlayankiz',
+ 'timestamp': 1163318593,
+ 'upload_date': '20061112',
+ 'duration': 253.666,
+ 'age_limit': 0,
+ }
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
+ thumbnail = self._proto_relative_url(
+ self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
- r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False, default='')
+ r"adduserUsername\s*=\s*'([^']+)';",
+ webpage, 'uploader', fatal=False, default='')
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date', fatal=False))
- duration = int_or_none(self._html_search_regex(
- r'"videoduration"\s*:\s*"([^"]+)"', webpage, 'duration', fatal=False))
- if duration:
- duration /= 1000.0
+ duration = float_or_none(self._html_search_regex(
+ r'"videoduration"\s*:\s*"([^"]+)"',
+ webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
- r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'uploader', fatal=False)
+ r'comment_count\s*=\s*\'([^\']+)\';',
+ webpage, 'comment_count', fatal=False)
family_friendly = self._html_search_meta(
'isFamilyFriendly', webpage, 'age limit', fatal=False)
ext = determine_ext(content_url, 'mp4')
# Might be empty for some videos.
- qualities = self._html_search_regex(
- r'"quality"\s*:\s*"([^"]+)"', webpage, 'qualities', fatal=False, default='')
+ streams = self._html_search_regex(
+ r'"qualitylevel"\s*:\s*"([^"]+)"',
+ webpage, 'streams', fatal=False, default='')
formats = []
- for quality in qualities.split('|'):
- json = self._download_json(
- self._STREAM_URL.format(id=video_id, format=quality), video_id,
- note='Getting video URL for "%s" quality' % quality,
- errnote='Failed to get video URL for "%s" quality' % quality
- )
+ if streams:
+ for stream in streams.split('|'):
+ quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
+ formats.append({
+ 'format_id': '%sp' % quality if quality else 'sd',
+ 'url': url,
+ 'ext': ext,
+ })
+ else:
+ stream_url = self._search_regex(
+ r'"streamurl"\s?:\s?"([^"]+)"', webpage, 'stream URL')
formats.append({
- 'url': json.get('streamurl'),
+ 'format_id': 'sd',
+ 'url': stream_url,
'ext': ext,
- 'format_id': '%sp' % quality if quality else 'sd',
})
return {
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ unified_strdate
+)
+
+
+class JoveIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?jove\.com/video/(?P<id>[0-9]+)'
+ _CHAPTERS_URL = 'http://www.jove.com/video-chapters?videoid={video_id:}'
+ _TESTS = [
+ {
+ 'url': 'http://www.jove.com/video/2744/electrode-positioning-montage-transcranial-direct-current',
+ 'md5': '93723888d82dbd6ba8b3d7d0cd65dd2b',
+ 'info_dict': {
+ 'id': '2744',
+ 'ext': 'mp4',
+ 'title': 'Electrode Positioning and Montage in Transcranial Direct Current Stimulation',
+ 'description': 'md5:015dd4509649c0908bc27f049e0262c6',
+ 'thumbnail': 're:^https?://.*\.png$',
+ 'upload_date': '20110523',
+ }
+ },
+ {
+ 'url': 'http://www.jove.com/video/51796/culturing-caenorhabditis-elegans-axenic-liquid-media-creation',
+ 'md5': '914aeb356f416811d911996434811beb',
+ 'info_dict': {
+ 'id': '51796',
+ 'ext': 'mp4',
+ 'title': 'Culturing Caenorhabditis elegans in Axenic Liquid Media and Creation of Transgenic Worms by Microparticle Bombardment',
+ 'description': 'md5:35ff029261900583970c4023b70f1dc9',
+ 'thumbnail': 're:^https?://.*\.png$',
+ 'upload_date': '20140802',
+ }
+ },
+
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ chapters_id = self._html_search_regex(
+ r'/video-chapters\?videoid=([0-9]+)', webpage, 'chapters id')
+
+ chapters_xml = self._download_xml(
+ self._CHAPTERS_URL.format(video_id=chapters_id),
+ video_id, note='Downloading chapters XML',
+ errnote='Failed to download chapters XML')
+
+ video_url = chapters_xml.attrib.get('video')
+ if not video_url:
+ raise ExtractorError('Failed to get the video URL')
+
+ title = self._html_search_meta('citation_title', webpage, 'title')
+ thumbnail = self._og_search_thumbnail(webpage)
+ description = self._html_search_regex(
+ r'<div id="section_body_summary"><p class="jove_content">(.+?)</p>',
+ webpage, 'description', fatal=False)
+ publish_date = unified_strdate(self._html_search_meta(
+ 'citation_publication_date', webpage, 'publish date', fatal=False))
+ comment_count = self._html_search_regex(
+ r'<meta name="num_comments" content="(\d+) Comments?"',
+ webpage, 'comment count', fatal=False)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'upload_date': publish_date,
+ 'comment_count': comment_count,
+ }
# coding=utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
int_or_none,
class JpopsukiIE(InfoExtractor):
IE_NAME = 'jpopsuki.tv'
- _VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
+ _VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
_TEST = {
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
- 'file': '00be659d23b0b40508169cdee4545771.mp4',
'info_dict': {
'id': '00be659d23b0b40508169cdee4545771',
+ 'ext': 'mp4',
'title': 'ayumi hamasaki - evolution',
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
uploader_id = self._html_search_regex(
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
webpage, 'video uploader_id', fatal=False)
- upload_date = self._html_search_regex(
+ upload_date = unified_strdate(self._html_search_regex(
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
- fatal=False)
- if upload_date is not None:
- upload_date = unified_strdate(upload_date)
+ fatal=False))
view_count_str = self._html_search_regex(
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
fatal=False)
class JukeboxIE(InfoExtractor):
- _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
+ _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
_TEST = {
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
- 'md5': '1574e9b4d6438446d5b7dbcdf2786276',
'info_dict': {
'id': 'r303r',
'ext': 'flv',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('video_id')
+ video_id = self._match_id(url)
html = self._download_webpage(url, video_id)
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
+++ /dev/null
-from __future__ import unicode_literals
-
-import itertools
-import json
-import os
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- compat_str,
- ExtractorError,
- formatSeconds,
-)
-
-
-class JustinTVIE(InfoExtractor):
- """Information extractor for justin.tv and twitch.tv"""
- # TODO: One broadcast may be split into multiple videos. The key
- # 'broadcast_id' is the same for all parts, and 'broadcast_part'
- # starts at 1 and increases. Can we treat all parts as one video?
-
- _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
- (?:
- (?P<channelid>[^/]+)|
- (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
- (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
- )
- /?(?:\#.*)?$
- """
- _JUSTIN_PAGE_LIMIT = 100
- IE_NAME = 'justin.tv'
- IE_DESC = 'justin.tv and twitch.tv'
- _TEST = {
- 'url': 'http://www.twitch.tv/thegamedevhub/b/296128360',
- 'md5': 'ecaa8a790c22a40770901460af191c9a',
- 'info_dict': {
- 'id': '296128360',
- 'ext': 'flv',
- 'upload_date': '20110927',
- 'uploader_id': 25114803,
- 'uploader': 'thegamedevhub',
- 'title': 'Beginner Series - Scripting With Python Pt.1'
- }
- }
-
- # Return count of items, list of *valid* items
- def _parse_page(self, url, video_id, counter):
- info_json = self._download_webpage(
- url, video_id,
- 'Downloading video info JSON on page %d' % counter,
- 'Unable to download video info JSON %d' % counter)
-
- response = json.loads(info_json)
- if type(response) != list:
- error_text = response.get('error', 'unknown error')
- raise ExtractorError('Justin.tv API: %s' % error_text)
- info = []
- for clip in response:
- video_url = clip['video_file_url']
- if video_url:
- video_extension = os.path.splitext(video_url)[1][1:]
- video_date = re.sub('-', '', clip['start_time'][:10])
- video_uploader_id = clip.get('user_id', clip.get('channel_id'))
- video_id = clip['id']
- video_title = clip.get('title', video_id)
- info.append({
- 'id': compat_str(video_id),
- 'url': video_url,
- 'title': video_title,
- 'uploader': clip.get('channel_name', video_uploader_id),
- 'uploader_id': video_uploader_id,
- 'upload_date': video_date,
- 'ext': video_extension,
- })
- return (len(response), info)
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
-
- api_base = 'http://api.justin.tv'
- paged = False
- if mobj.group('channelid'):
- paged = True
- video_id = mobj.group('channelid')
- api = api_base + '/channel/archives/%s.json' % video_id
- elif mobj.group('chapterid'):
- chapter_id = mobj.group('chapterid')
-
- webpage = self._download_webpage(url, chapter_id)
- m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
- if not m:
- raise ExtractorError('Cannot find archive of a chapter')
- archive_id = m.group(1)
-
- api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
- doc = self._download_xml(
- api, chapter_id,
- note='Downloading chapter information',
- errnote='Chapter information download failed')
- for a in doc.findall('.//archive'):
- if archive_id == a.find('./id').text:
- break
- else:
- raise ExtractorError('Could not find chapter in chapter information')
-
- video_url = a.find('./video_file_url').text
- video_ext = video_url.rpartition('.')[2] or 'flv'
-
- chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
- chapter_info = self._download_json(
- chapter_api_url, 'c' + chapter_id,
- note='Downloading chapter metadata',
- errnote='Download of chapter metadata failed')
-
- bracket_start = int(doc.find('.//bracket_start').text)
- bracket_end = int(doc.find('.//bracket_end').text)
-
- # TODO determine start (and probably fix up file)
- # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
- #video_url += '?start=' + TODO:start_timestamp
- # bracket_start is 13290, but we want 51670615
- self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
- 'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
-
- info = {
- 'id': 'c' + chapter_id,
- 'url': video_url,
- 'ext': video_ext,
- 'title': chapter_info['title'],
- 'thumbnail': chapter_info['preview'],
- 'description': chapter_info['description'],
- 'uploader': chapter_info['channel']['display_name'],
- 'uploader_id': chapter_info['channel']['name'],
- }
- return info
- else:
- video_id = mobj.group('videoid')
- api = api_base + '/broadcast/by_archive/%s.json' % video_id
-
- entries = []
- offset = 0
- limit = self._JUSTIN_PAGE_LIMIT
- for counter in itertools.count(1):
- page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
- page_count, page_info = self._parse_page(
- page_url, video_id, counter)
- entries.extend(page_info)
- if not paged or page_count != limit:
- break
- offset += limit
- return {
- '_type': 'playlist',
- 'id': video_id,
- 'entries': entries,
- }
class KhanAcademyIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:www\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
+ _VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
IE_NAME = 'KhanAcademy'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.khanacademy.org/video/one-time-pad',
- 'file': 'one-time-pad.mp4',
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
'info_dict': {
+ 'id': 'one-time-pad',
+ 'ext': 'mp4',
'title': 'The one-time pad',
'description': 'The perfect cipher',
'duration': 176,
'uploader': 'Brit Cruise',
'upload_date': '20120411',
}
- }
+ }, {
+ 'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
+ 'info_dict': {
+ 'id': 'cryptography',
+ 'title': 'Journey into cryptography',
+ 'description': 'How have humans protected their secret messages through history? What has changed today?',
+ },
+ 'playlist_mincount': 3,
+ }]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
# encoding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
- 'playlist': [
- {
- 'info_dict': {
- 'id': '78704821',
- 'ext': 'mp4',
- 'uploader_id': 'pebble',
- 'uploader': 'Pebble Technology',
- 'title': 'Pebble iOS Notifications',
- }
- }
- ],
+ 'info_dict': {
+ 'id': '78704821',
+ 'ext': 'mp4',
+ 'uploader_id': 'pebble',
+ 'uploader': 'Pebble Technology',
+ 'title': 'Pebble iOS Notifications',
+ }
}]
def _real_extract(self, url):
- m = re.match(self._VALID_URL, url)
- video_id = m.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
title = self._html_search_regex(
- r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
+ r'<title>(.+?)</title>', webpage, 'video title')
description = self._html_search_meta('description', webpage, 'video description')
mobj = re.search(
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
+
+ title = self._html_search_regex(
+ r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
jsonData = self._download_json(dataUrl, video_id)
downloadUrl = jsonData['data']['f']
--- /dev/null
+from __future__ import unicode_literals
+
+import random
+import re
+
+from .common import InfoExtractor
+
+
+class Laola1TvIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
+ _TEST = {
+ 'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
+ 'info_dict': {
+ 'id': '250019',
+ 'ext': 'mp4',
+ 'title': 'Bitburger Open Grand Prix Gold - Court 1',
+ 'categories': ['Badminton'],
+ 'uploader': 'BWF - Badminton World Federation',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }
+
+ _BROKEN = True # Not really - extractor works fine, but f4m downloader does not support live streams yet.
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ lang = mobj.group('lang')
+ portal = mobj.group('portal')
+
+ webpage = self._download_webpage(url, video_id)
+ iframe_url = self._search_regex(
+ r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
+ webpage, 'iframe URL')
+
+ iframe = self._download_webpage(
+ iframe_url, video_id, note='Downloading iframe')
+ flashvars_m = re.findall(
+ r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
+ flashvars = dict((m[0], m[1]) for m in flashvars_m)
+
+ xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
+ 'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
+ video_id, portal, lang))
+ hd_doc = self._download_xml(xml_url, video_id)
+
+ title = hd_doc.find('.//video/title').text
+ flash_url = hd_doc.find('.//video/url').text
+ categories = hd_doc.find('.//video/meta_sports').text.split(',')
+ uploader = hd_doc.find('.//video/meta_organistation').text
+
+ ident = random.randint(10000000, 99999999)
+ token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
+ flash_url, ident, flashvars['timestamp'], flashvars['auth'])
+
+ token_doc = self._download_xml(
+ token_url, video_id, note='Downloading token')
+ token_attrib = token_doc.find('.//token').attrib
+ if token_attrib.get('auth') == 'blocked':
+ raise ExtractorError('Token error: ' % token_attrib.get('comment'))
+
+ video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
+ token_attrib['url'], token_attrib['auth'])
+
+ return {
+ 'id': video_id,
+ 'is_live': True,
+ 'title': title,
+ 'url': video_url,
+ 'uploader': uploader,
+ 'categories': categories,
+ 'ext': 'mp4',
+ }
+
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
- _TEST = {
+ _TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
'info_dict': {
'view_count': int,
'thumbnail': 're:^http://.*\.jpg$'
}
- }
+ }, {
+ 'url': 'http://new.livestream.com/tedx/cityenglish',
+ 'info_dict': {
+ 'title': 'TEDCity2.0 (English)',
+ },
+ 'playlist_mincount': 4,
+ }]
def _parse_smil(self, video_id, smil_url):
formats = []
event_name = mobj.group('event_name')
webpage = self._download_webpage(url, video_id or event_name)
+ og_video = self._og_search_video_url(
+ webpage, 'player url', fatal=False, default=None)
+ if og_video is not None:
+ query_str = compat_urllib_parse_urlparse(og_video).query
+ query = compat_urlparse.parse_qs(query_str)
+ if 'play_url' in query:
+ api_url = query['play_url'][0].replace('.smil', '')
+ info = json.loads(self._download_webpage(
+ api_url, video_id, 'Downloading video info'))
+ return self._extract_video_info(info)
+
+ config_json = self._search_regex(
+ r'window.config = ({.*?});', webpage, 'window config')
+ info = json.loads(config_json)['event']
+
+ def is_relevant(vdata, vid):
+ result = vdata['type'] == 'video'
+ if video_id is not None:
+ result = result and compat_str(vdata['data']['id']) == vid
+ return result
+
+ videos = [self._extract_video_info(video_data['data'])
+ for video_data in info['feed']['data']
+ if is_relevant(video_data, video_id)]
if video_id is None:
# This is an event page:
- config_json = self._search_regex(
- r'window.config = ({.*?});', webpage, 'window config')
- info = json.loads(config_json)['event']
- videos = [self._extract_video_info(video_data['data'])
- for video_data in info['feed']['data']
- if video_data['type'] == 'video']
return self.playlist_result(videos, info['id'], info['full_name'])
else:
- og_video = self._og_search_video_url(webpage, 'player url')
- query_str = compat_urllib_parse_urlparse(og_video).query
- query = compat_urlparse.parse_qs(query_str)
- api_url = query['play_url'][0].replace('.smil', '')
- info = json.loads(self._download_webpage(
- api_url, video_id, 'Downloading video info'))
- return self._extract_video_info(info)
+ if not videos:
+ raise ExtractorError('Cannot find video %s' % video_id)
+ return videos[0]
# The original version of Livestream uses a different system
(?P<user>[^/]+)/(?P<type>video|folder)
(?:\?.*?Id=|/)(?P<id>.*?)(&|$)
'''
- _TEST = {
+ _TESTS = [{
'url': 'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'info_dict': {
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
# rtmp
'skip_download': True,
},
- }
+ }, {
+ 'url': 'https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+ 'info_dict': {
+ 'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
+ },
+ 'playlist_mincount': 4,
+ }]
def _extract_video(self, user, video_id):
api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
'id': video_id,
'title': item.find('title').text,
'url': 'rtmp://extondemand.livestream.com/ondemand',
- 'play_path': 'mp4:trans/dv15/mogulus-{0}.mp4'.format(path),
+ 'play_path': 'trans/dv15/mogulus-{0}'.format(path),
+ 'player_url': 'http://static.livestream.com/chromelessPlayer/v21/playerapi.swf?hash=5uetk&v=0803&classid=D27CDB6E-AE6D-11cf-96B8-444553540000&jsEnabled=false&wmode=opaque',
'ext': 'flv',
'thumbnail': thumbnail_url,
}
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
- urls = orderedSet(re.findall(r'<a href="(https?://livestre\.am/.*?)"', webpage))
+ paths = orderedSet(re.findall(
+ r'''(?x)(?:
+ <li\s+class="folder">\s*<a\s+href="|
+ <a\s+href="(?=https?://livestre\.am/)
+ )([^"]+)"''', webpage))
return {
'_type': 'playlist',
'id': folder_id,
'entries': [{
'_type': 'url',
- 'url': video_url,
- } for video_url in urls],
+ 'url': compat_urlparse.urljoin(url, p),
+ } for p in paths],
}
def _real_extract(self, url):
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ js_to_json,
+ parse_duration,
+ remove_end,
+)
+
+
+class LRTIE(InfoExtractor):
+ IE_NAME = 'lrt.lt'
+ _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
+ 'info_dict': {
+ 'id': '54391',
+ 'ext': 'mp4',
+ 'title': 'Septynios Kauno dienos',
+ 'description': 'md5:24d84534c7dc76581e59f5689462411a',
+ 'duration': 1783,
+ },
+ 'params': {
+ 'skip_download': True, # HLS download
+ },
+
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = remove_end(self._og_search_title(webpage), ' - LRT')
+ thumbnail = self._og_search_thumbnail(webpage)
+ description = self._og_search_description(webpage)
+ duration = parse_duration(self._search_regex(
+ r"'duration':\s*'([^']+)',", webpage,
+ 'duration', fatal=False, default=None))
+
+ formats = []
+ for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
+ data = json.loads(js_to_json(js))
+ if data['provider'] == 'rtmp':
+ formats.append({
+ 'format_id': 'rtmp',
+ 'ext': determine_ext(data['file']),
+ 'url': data['streamer'],
+ 'play_path': 'mp4:%s' % data['file'],
+ 'preference': -1,
+ })
+ else:
+ formats.extend(
+ self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'duration': duration,
+ }
compat_urllib_request,
determine_ext,
ExtractorError,
+ int_or_none,
)
'skip_download': True,
},
},
+ # Movieclips.com video
+ {
+ 'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/',
+ 'info_dict': {
+ 'id': 'mv-Wy7ZU',
+ 'ext': 'mp4',
+ 'title': 'My Week with Marilyn - Do You Love Me?',
+ 'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.',
+ 'uploader': 'movie_trailers',
+ 'duration': 176,
+ },
+ 'params': {
+ 'skip_download': 'requires rtmpdump',
+ }
+ }
]
def report_disclaimer(self):
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
+ video_url = None
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
- else:
+ if video_url is None:
mobj = re.search(r'<video src="([^"]+)"', webpage)
if mobj:
video_url = mobj.group(1)
video_ext = 'mp4'
- else:
- mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
- if mobj is None:
- raise ExtractorError('Unable to extract media URL')
- vardict = compat_parse_qs(mobj.group(1))
+ if video_url is None:
+ flashvars = self._search_regex(
+ r' name="flashvars" value="(.*?)"', webpage, 'flashvars',
+ default=None)
+ if flashvars:
+ vardict = compat_parse_qs(flashvars)
if 'mediaData' not in vardict:
raise ExtractorError('Unable to extract media URL')
mobj = re.search(
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
video_ext = determine_ext(video_url)
-
- video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, 'title')
+ if video_url is None:
+ player_url = self._search_regex(
+ r"swfobject\.embedSWF\('([^']+)'",
+ webpage, 'config URL', default=None)
+ if player_url:
+ config_url = self._search_regex(
+ r'config=(.+)$', player_url, 'config URL')
+ config_doc = self._download_xml(
+ config_url, video_id,
+ note='Downloading video config')
+ smil_url = config_doc.find('.//properties').attrib['smil_file']
+ smil_doc = self._download_xml(
+ smil_url, video_id,
+ note='Downloading SMIL document')
+ base_url = smil_doc.find('./head/meta').attrib['base']
+ video_url = []
+ for vn in smil_doc.findall('.//video'):
+ br = int(vn.attrib['system-bitrate'])
+ play_path = vn.attrib['src']
+ video_url.append({
+ 'format_id': 'smil-%d' % br,
+ 'url': base_url,
+ 'play_path': play_path,
+ 'page_url': url,
+ 'player_url': player_url,
+ 'ext': play_path.partition(':')[0],
+ })
+
+ if video_url is None:
+ raise ExtractorError('Unsupported video type')
+
+ video_title = self._html_search_regex(
+ r'(?im)<title>(.*) - Video</title>', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video_uploader = self._html_search_regex(
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
webpage, 'uploader nickname', fatal=False)
+ duration = int_or_none(
+ self._html_search_meta('video:duration', webpage))
+
+ age_limit = (
+ 18
+ if re.search(r'"contentRating":"restricted"', webpage)
+ else 0)
- if re.search(r'"contentRating":"restricted"', webpage) is not None:
- age_limit = 18
+ if isinstance(video_url, list):
+ formats = video_url
else:
- age_limit = 0
+ formats = [{
+ 'url': video_url,
+ 'ext': video_ext,
+ }]
+ self._sort_formats(formats)
return {
'id': video_id,
- 'url': video_url,
'description': description,
'uploader': video_uploader,
'title': video_title,
- 'thumbnail':thumbnail,
- 'ext': video_ext,
+ 'thumbnail': thumbnail,
'age_limit': age_limit,
+ 'formats': formats,
+ 'duration': duration,
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ qualities,
+ unified_strdate,
+)
+
+
+class MgoonIE(InfoExtractor):
+ _VALID_URL = r'''(?x)https?://(?:www\.)?
+ (?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
+ video\.mgoon\.com)/(?P<id>[0-9]+)'''
+ _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
+ _TESTS = [
+ {
+ 'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
+ 'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
+ 'info_dict': {
+ 'id': '5582148',
+ 'uploader_id': 'hi6618',
+ 'duration': 240.419,
+ 'upload_date': '20131220',
+ 'ext': 'mp4',
+ 'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ },
+ {
+ 'url': 'http://www.mgoon.com/play/view/5582148',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://video.mgoon.com/5582148',
+ 'only_matching': True,
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ data = self._download_json(self._API_URL.format(video_id), video_id)
+
+ if data.get('errorInfo', {}).get('code') != 'NONE':
+ raise ExtractorError('%s encountered an error: %s' % (
+ self.IE_NAME, data['errorInfo']['message']), expected=True)
+
+ v_info = data['videoInfo']
+ title = v_info.get('v_title')
+ thumbnail = v_info.get('v_thumbnail')
+ duration = v_info.get('v_duration')
+ upload_date = unified_strdate(v_info.get('v_reg_date'))
+ uploader_id = data.get('userInfo', {}).get('u_alias')
+ if duration:
+ duration /= 1000.0
+
+ age_limit = None
+ if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
+ age_limit = 18
+
+ formats = []
+ get_quality = qualities(['360p', '480p', '720p', '1080p'])
+ for fmt in data['videoFiles']:
+ formats.append({
+ 'format_id': fmt['label'],
+ 'quality': get_quality(fmt['label']),
+ 'url': fmt['url'],
+ 'ext': fmt['format'],
+
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'upload_date': upload_date,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ smuggle_url,
+)
+
+
+class MinistryGridIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.ministrygrid.com/([^/?#]*/)*(?P<id>[^/#?]+)/?(?:$|[?#])'
+
+ _TEST = {
+ 'url': 'http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers',
+ 'md5': '844be0d2a1340422759c2a9101bab017',
+ 'info_dict': {
+ 'id': '3453494717001',
+ 'ext': 'mp4',
+ 'title': 'The Gospel by Numbers',
+ 'description': 'Coming soon from T4G 2014!',
+ 'uploader': 'LifeWay Christian Resources (MG)',
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ portlets_json = self._search_regex(
+ r'Liferay\.Portlet\.list=(\[.+?\])', webpage, 'portlet list')
+ portlets = json.loads(portlets_json)
+ pl_id = self._search_regex(
+ r'<!--\s*p_l_id - ([0-9]+)<br>', webpage, 'p_l_id')
+
+ for i, portlet in enumerate(portlets):
+ portlet_url = 'http://www.ministrygrid.com/c/portal/render_portlet?p_l_id=%s&p_p_id=%s' % (pl_id, portlet)
+ portlet_code = self._download_webpage(
+ portlet_url, video_id,
+ note='Looking in portlet %s (%d/%d)' % (portlet, i + 1, len(portlets)),
+ fatal=False)
+ video_iframe_url = self._search_regex(
+ r'<iframe.*?src="([^"]+)"', portlet_code, 'video iframe',
+ default=None)
+ if video_iframe_url:
+ surl = smuggle_url(
+ video_iframe_url, {'force_videoid': video_id})
+ return {
+ '_type': 'url',
+ 'id': video_id,
+ 'url': surl,
+ }
+
+ raise ExtractorError('Could not find video iframe in any portlets')
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urlparse,
+ get_element_by_attribute,
+ parse_duration,
+ strip_jsonp,
+)
+
+
+class MiTeleIE(InfoExtractor):
+ IE_NAME = 'mitele.es'
+ _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<episode>[^/]+)/'
+
+ _TEST = {
+ 'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
+ 'md5': '6a75fe9d0d3275bead0cb683c616fddb',
+ 'info_dict': {
+ 'id': '0fce117d',
+ 'ext': 'mp4',
+ 'title': 'Programa 144 - Tor, la web invisible',
+ 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+ 'display_id': 'programa-144',
+ 'duration': 2913,
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ episode = mobj.group('episode')
+ webpage = self._download_webpage(url, episode)
+ embed_data_json = self._search_regex(
+ r'MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
+ flags=re.DOTALL
+ ).replace('\'', '"')
+ embed_data = json.loads(embed_data_json)
+
+ domain = embed_data['mediaUrl']
+ if not domain.startswith('http'):
+ # only happens in telecinco.es videos
+ domain = 'http://' + domain
+ info_url = compat_urlparse.urljoin(
+ domain,
+ compat_urllib_parse.unquote(embed_data['flashvars']['host'])
+ )
+ info_el = self._download_xml(info_url, episode).find('./video/info')
+
+ video_link = info_el.find('videoUrl/link').text
+ token_query = compat_urllib_parse.urlencode({'id': video_link})
+ token_info = self._download_json(
+ embed_data['flashvars']['ov_tk'] + '?' + token_query,
+ episode,
+ transform_source=strip_jsonp
+ )
+
+ return {
+ 'id': embed_data['videoId'],
+ 'display_id': episode,
+ 'title': info_el.find('title').text,
+ 'url': token_info['tokenizedUrl'],
+ 'description': get_element_by_attribute('class', 'text', webpage),
+ 'thumbnail': info_el.find('thumb').text,
+ 'duration': parse_duration(info_el.find('duration').text),
+ }
from ..utils import (
compat_urllib_parse,
ExtractorError,
+ HEADRequest,
int_or_none,
parse_iso8601,
)
},
}
- def check_urls(self, url_list):
- """Returns 1st active url from list"""
- for url in url_list:
+ def _get_url(self, track_id, template_url):
+ server_count = 30
+ for i in range(server_count):
+ url = template_url % i
try:
# We only want to know if the request succeed
# don't download the whole file
- self._request_webpage(url, None, False)
+ self._request_webpage(
+ HEADRequest(url), track_id,
+ 'Checking URL %d/%d ...' % (i + 1, server_count + 1))
return url
except ExtractorError:
- url = None
+ pass
return None
- def _get_url(self, template_url):
- return self.check_urls(template_url % i for i in range(30))
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group(1)
r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url')
song_url = preview_url.replace('/previews/', '/c/originals/')
template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
- final_song_url = self._get_url(template_url)
+ final_song_url = self._get_url(track_id, template_url)
if final_song_url is None:
self.to_screen('Trying with m4a extension')
template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
- final_song_url = self._get_url(template_url)
+ final_song_url = self._get_url(track_id, template_url)
if final_song_url is None:
raise ExtractorError('Unable to extract track url')
PREFIX = (
- r'<div class="cloudcast-play-button-container"'
+ r'<div class="cloudcast-play-button-container[^"]*?"'
r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
title = self._html_search_regex(
PREFIX + r'm-title="([^"]+)"', webpage, 'title')
from ..utils import (
parse_duration,
parse_iso8601,
- find_xpath_attr,
)
class MLBIE(InfoExtractor):
- _VALID_URL = r'https?://m\.mlb\.com/(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v(?P<id>n?\d+)'
+ _VALID_URL = r'https?://m\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|shared/video/embed/embed\.html\?.*?\bcontent_id=)(?P<id>n?\d+)'
_TESTS = [
{
'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
+ {
+ 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
duration = parse_duration(detail.find('./duration').text)
timestamp = parse_iso8601(detail.attrib['date'][:-5])
- thumbnail = find_xpath_attr(
- detail, './thumbnailScenarios/thumbnailScenario', 'type', '45').text
+ thumbnails = [{
+ 'url': thumbnail.text,
+ } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
formats = []
for media_url in detail.findall('./url'):
'duration': duration,
'timestamp': timestamp,
'formats': formats,
- 'thumbnail': thumbnail,
+ 'thumbnails': thumbnails,
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_urllib_parse,
+ compat_urllib_request,
+ int_or_none,
+)
+
+
+class MoeVideoIE(InfoExtractor):
+ IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
+ _VALID_URL = r'''(?x)
+ https?://(?P<host>(?:www\.)?
+ (?:(?:moevideo|playreplay|videochart)\.net))/
+ (?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)'''
+ _API_URL = 'http://api.letitbit.net/'
+ _API_KEY = 'tVL0gjqo5'
+ _TESTS = [
+ {
+ 'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
+ 'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
+ 'info_dict': {
+ 'id': '00297.0036103fe3d513ef27915216fd29',
+ 'ext': 'flv',
+ 'title': 'Sink cut out machine',
+ 'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'width': 540,
+ 'height': 360,
+ 'duration': 179,
+ 'filesize': 17822500,
+ }
+ },
+ {
+ 'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
+ 'md5': '74f0a014d5b661f0f0e2361300d1620e',
+ 'info_dict': {
+ 'id': '77107.7f325710a627383d40540d8e991a',
+ 'ext': 'flv',
+ 'title': 'Operacion Condor.',
+ 'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'width': 480,
+ 'height': 296,
+ 'duration': 6027,
+ 'filesize': 588257923,
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(
+ 'http://%s/video/%s' % (mobj.group('host'), video_id),
+ video_id, 'Downloading webpage')
+
+ title = self._og_search_title(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+ description = self._og_search_description(webpage)
+
+ r = [
+ self._API_KEY,
+ [
+ 'preview/flv_link',
+ {
+ 'uid': video_id,
+ },
+ ],
+ ]
+ r_json = json.dumps(r)
+ post = compat_urllib_parse.urlencode({'r': r_json})
+ req = compat_urllib_request.Request(self._API_URL, post)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+ response = self._download_json(req, video_id)
+ if response['status'] != 'OK':
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, response['data']),
+ expected=True
+ )
+ item = response['data'][0]
+ video_url = item['link']
+ duration = int_or_none(item['length'])
+ width = int_or_none(item['width'])
+ height = int_or_none(item['height'])
+ filesize = int_or_none(item['convert_size'])
+
+ formats = [{
+ 'format_id': 'sd',
+ 'http_headers': {'Range': 'bytes=0-'}, # Required to download
+ 'url': video_url,
+ 'width': width,
+ 'height': height,
+ 'filesize': filesize,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'duration': duration,
+ 'formats': formats,
+ }
+from __future__ import unicode_literals
+
import os
import re
compat_urllib_parse,
)
+
class MofosexIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
+ _VALID_URL = r'^https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<videoid>[0-9]+)/.*?\.html)'
_TEST = {
- u'url': u'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
- u'file': u'5018.mp4',
- u'md5': u'1b2eb47ac33cc75d4a80e3026b613c5a',
- u'info_dict': {
- u"title": u"Japanese Teen Music Video",
- u"age_limit": 18,
+ 'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html',
+ 'md5': '1b2eb47ac33cc75d4a80e3026b613c5a',
+ 'info_dict': {
+ 'id': '5018',
+ 'ext': 'mp4',
+ 'title': 'Japanese Teen Music Video',
+ 'age_limit': 18,
}
}
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
- video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, u'title')
- video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, u'video_url'))
+ video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, 'title')
+ video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ parse_duration,
+)
+
+
+class MojvideoIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)'
+ _TEST = {
+ 'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906',
+ 'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7',
+ 'info_dict': {
+ 'id': '3d1ed4497707730b2906',
+ 'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic',
+ 'ext': 'mp4',
+ 'title': 'V avtu pred mano rdečelaska - Alfi Nipič',
+ 'thumbnail': 're:^http://.*\.jpg$',
+ 'duration': 242,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ # XML is malformed
+ playerapi = self._download_webpage(
+ 'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id)
+
+ if '<error>true</error>' in playerapi:
+ error_desc = self._html_search_regex(
+ r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False)
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True)
+
+ title = self._html_search_regex(
+ r'<title>([^<]+)</title>', playerapi, 'title')
+ video_url = self._html_search_regex(
+ r'<file>([^<]+)</file>', playerapi, 'video URL')
+ thumbnail = self._html_search_regex(
+ r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False)
+ duration = parse_duration(self._html_search_regex(
+ r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': video_url,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ }
\ No newline at end of file
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import os.path
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+
+
+class MonikerIE(InfoExtractor):
+ IE_DESC = 'allmyvideos.net and vidspot.net'
+ _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
+
+ _TESTS = [{
+ 'url': 'http://allmyvideos.net/jih3nce3x6wn',
+ 'md5': '710883dee1bfc370ecf9fa6a89307c88',
+ 'info_dict': {
+ 'id': 'jih3nce3x6wn',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video',
+ },
+ }, {
+ 'url': 'http://vidspot.net/l2ngsmhs8ci5',
+ 'md5': '710883dee1bfc370ecf9fa6a89307c88',
+ 'info_dict': {
+ 'id': 'l2ngsmhs8ci5',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video',
+ },
+ }, {
+ 'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ orig_webpage = self._download_webpage(url, video_id)
+ fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
+ data = dict(fields)
+
+ post = compat_urllib_parse.urlencode(data)
+ headers = {
+ b'Content-Type': b'application/x-www-form-urlencoded',
+ }
+ req = compat_urllib_request.Request(url, post, headers)
+ webpage = self._download_webpage(
+ req, video_id, note='Downloading video page ...')
+
+ title = os.path.splitext(data['fname'])[0]
+
+ #Could be several links with different quality
+ links = re.findall(r'"file" : "?(.+?)",', webpage)
+ # Assume the links are ordered in quality
+ formats = [{
+ 'url': l,
+ 'quality': i,
+ } for i, l in enumerate(links)]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ }
from .common import InfoExtractor
from ..utils import (
- int_or_none,
+ str_to_int,
unified_strdate,
)
class MotherlessIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)'
+ _VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
_TESTS = [
{
'url': 'http://motherless.com/AC3FFE1',
- 'md5': '5527fef81d2e529215dad3c2d744a7d9',
+ 'md5': '310f62e325a9fafe64f68c0bccb6e75f',
'info_dict': {
'id': 'AC3FFE1',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Fucked in the ass while playing PS3',
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
'upload_date': '20100913',
'thumbnail': 're:http://.*\.jpg',
'age_limit': 18,
}
+ },
+ {
+ 'url': 'http://motherless.com/g/cosplay/633979F',
+ 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
+ 'info_dict': {
+ 'id': '633979F',
+ 'ext': 'mp4',
+ 'title': 'Turtlette',
+ 'categories': ['superheroine heroine superher'],
+ 'upload_date': '20140827',
+ 'uploader_id': 'shade0230',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'age_limit': 18,
+ }
}
]
- def _real_extract(self,url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- title = self._html_search_regex(r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
-
- video_url = self._html_search_regex(r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video_url')
+ title = self._html_search_regex(
+ r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+ video_url = self._html_search_regex(
+ r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL')
age_limit = self._rta_search(webpage)
-
- view_count = self._html_search_regex(r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view_count')
+ view_count = str_to_int(self._html_search_regex(
+ r'<strong>Views</strong>\s+([^<]+)<',
+ webpage, 'view count', fatal=False))
+ like_count = str_to_int(self._html_search_regex(
+ r'<strong>Favorited</strong>\s+([^<]+)<',
+ webpage, 'like count', fatal=False))
- upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload_date')
+ upload_date = self._html_search_regex(
+ r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
if 'Ago' in upload_date:
days = int(re.search(r'([0-9]+)', upload_date).group(1))
upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
else:
upload_date = unified_strdate(upload_date)
- like_count = self._html_search_regex(r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like_count')
-
comment_count = webpage.count('class="media-comment-contents"')
- uploader_id = self._html_search_regex(r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id')
+ uploader_id = self._html_search_regex(
+ r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
+ webpage, 'uploader_id')
categories = self._html_search_meta('keywords', webpage)
if categories:
'uploader_id': uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'categories': categories,
- 'view_count': int_or_none(view_count.replace(',', '')),
- 'like_count': int_or_none(like_count.replace(',', '')),
+ 'view_count': view_count,
+ 'like_count': like_count,
'comment_count': comment_count,
'age_limit': age_limit,
'url': video_url,
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_str,
+ clean_html,
+)
+
+
+class MovieClipsIE(InfoExtractor):
+ _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
+ _TEST = {
+ 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
+ 'info_dict': {
+ 'id': 'Wy7ZU',
+ 'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
+ 'ext': 'mp4',
+ 'title': 'My Week with Marilyn - Do You Love Me?',
+ 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+ show_id = display_id or video_id
+
+ config = self._download_xml(
+ 'http://config.movieclips.com/player/config/%s' % video_id,
+ show_id, 'Downloading player config')
+
+ if config.find('./country-region').text == 'false':
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
+
+ properties = config.find('./video/properties')
+ smil_file = properties.attrib['smil_file']
+
+ smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
+ base_url = smil.find('./head/meta').attrib['base']
+
+ formats = []
+ for video in smil.findall('./body/switch/video'):
+ vbr = int(video.attrib['system-bitrate']) / 1000
+ src = video.attrib['src']
+ formats.append({
+ 'url': base_url,
+ 'play_path': src,
+ 'ext': src.split(':')[0],
+ 'vbr': vbr,
+ 'format_id': '%dk' % vbr,
+ })
+
+ self._sort_formats(formats)
+
+ title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
+ description = clean_html(compat_str(properties.attrib['clip_description']))
+ thumbnail = properties.attrib['image']
+ categories = properties.attrib['clip_categories'].split(',')
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'formats': formats,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ js_to_json,
+)
+
+
+class MuenchenTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream'
+ IE_DESC = 'münchen.tv'
+ _TEST = {
+ 'url': 'http://www.muenchen.tv/livestream/',
+ 'info_dict': {
+ 'id': '5334',
+ 'display_id': 'live',
+ 'ext': 'mp4',
+ 'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'is_live': True,
+ 'thumbnail': 're:^https?://.*\.jpg$'
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }
+
+ def _real_extract(self, url):
+ display_id = 'live'
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._live_title(self._og_search_title(webpage))
+
+ data_js = self._search_regex(
+ r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
+ webpage, 'playlist configuration')
+ data_json = js_to_json(data_js)
+ data = json.loads(data_json)[0]
+
+ video_id = data['mediaid']
+ thumbnail = data.get('image')
+
+ formats = []
+ for format_num, s in enumerate(data['sources']):
+ ext = determine_ext(s['file'], None)
+ label_str = s.get('label')
+ if label_str is None:
+ label_str = '_%d' % format_num
+
+ if ext is None:
+ format_id = label_str
+ else:
+ format_id = '%s-%s' % (ext, label_str)
+
+ formats.append({
+ 'url': s['file'],
+ 'tbr': int_or_none(s.get('label')),
+ 'ext': 'mp4',
+ 'format_id': format_id,
+ 'preference': -100 if '.smil' in s['file'] else 0,
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'formats': formats,
+ 'is_live': True,
+ 'thumbnail': thumbnail,
+ }
+
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ unified_strdate,
+)
+
+
+class MusicVaultIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
+ _TEST = {
+ 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
+ 'md5': '2cdbb3ae75f7fb3519821507d2fb3c15',
+ 'info_dict': {
+ 'id': '1010863',
+ 'ext': 'mp4',
+ 'uploader_id': 'the-allman-brothers-band',
+ 'title': 'Straight from the Heart',
+ 'duration': 244,
+ 'uploader': 'The Allman Brothers Band',
+ 'thumbnail': 're:^https?://.*/thumbnail/.*',
+ 'upload_date': '19811216',
+ 'location': 'Capitol Theatre (Passaic, NJ)',
+ 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('display_id')
+ webpage = self._download_webpage(url, display_id)
+
+ thumbnail = self._search_regex(
+ r'<meta itemprop="thumbnail" content="([^"]+)"',
+ webpage, 'thumbnail', fatal=False)
+
+ data_div = self._search_regex(
+ r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
+ uploader = self._html_search_regex(
+ r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
+ title = self._html_search_regex(
+ r'<h2.*?>(.*?)</h2>', data_div, 'title')
+ upload_date = unified_strdate(self._html_search_regex(
+ r'<h3.*?>(.*?)</h3>', data_div, 'uploader', fatal=False))
+ location = self._html_search_regex(
+ r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
+
+ duration = parse_duration(self._html_search_meta('duration', webpage))
+
+ VIDEO_URL_TEMPLATE = 'http://cdnapi.kaltura.com/p/%(uid)s/sp/%(wid)s/playManifest/entryId/%(entry_id)s/format/url/protocol/http'
+ kaltura_id = self._search_regex(
+ r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
+ webpage, 'kaltura ID')
+ video_url = VIDEO_URL_TEMPLATE % {
+ 'entry_id': kaltura_id,
+ 'wid': self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid'),
+ 'uid': self._search_regex(r'uiconf_id/([0-9]+)/', webpage, 'uid'),
+ }
+
+ return {
+ 'id': mobj.group('id'),
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'display_id': display_id,
+ 'uploader_id': mobj.group('uploader_id'),
+ 'thumbnail': thumbnail,
+ 'description': self._html_search_meta('description', webpage),
+ 'upload_date': upload_date,
+ 'location': location,
+ 'title': title,
+ 'uploader': uploader,
+ 'duration': duration,
+ }
from ..utils import (
compat_urllib_parse,
ExtractorError,
+ clean_html,
)
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
+ m_error = re.search(
+ r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
+ webpage)
+ if m_error:
+ raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
import re
from .common import InfoExtractor
+from ..utils import (
+ remove_end,
+ parse_duration,
+)
class NBAIE(InfoExtractor):
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
_TEST = {
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
- 'md5': u'c0edcfc37607344e2ff8f13c378c88a4',
+ 'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
'info_dict': {
'id': '0021200253-okc-bkn-recap.nba',
'ext': 'mp4',
- 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'title': 'Thunder vs. Nets',
+ 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
+ 'duration': 181,
},
}
video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
shortened_video_id = video_id.rpartition('/')[2]
- title = self._og_search_title(webpage, default=shortened_video_id).replace('NBA.com: ', '')
+ title = remove_end(
+ self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
+
+ description = self._og_search_description(webpage)
+ duration = parse_duration(
+ self._html_search_meta('duration', webpage, 'duration', fatal=False))
- description = self._html_search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
return {
'id': shortened_video_id,
'url': video_url,
'title': title,
'description': description,
+ 'duration': duration,
}
_TEST = {
'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
- 'md5': '54d0fbc33e0b853a65d7b4de5c06d64e',
+ # md5 checksum is not stable
'info_dict': {
- 'id': 'u1RInQZRN7QJ',
+ 'id': 'bTmnLCvIbaaH',
'ext': 'flv',
'title': 'I Am a Firefighter',
'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
theplatform_url = self._search_regex('class="video-player video-player-full" data-mpx-url="(.*?)"', webpage, 'theplatform url')
if theplatform_url.startswith('//'):
'md5': 'b2421750c9f260783721d898f4c42063',
'info_dict': {
'id': 'I1wpAI_zmhsQ',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'How Twitter Reacted To The Snowden Interview',
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
},
]
for base_url in base_urls:
+ if not base_url:
+ continue
playlist_url = base_url + '?form=MPXNBCNewsAPI'
all_videos = self._download_json(playlist_url, title)['videos']
_TESTS = [
{
- 'url': 'http://www.ndr.de/fernsehen/media/dienordreportage325.html',
- 'md5': '4a4eeafd17c3058b65f0c8f091355855',
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
+ 'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
'note': 'Video file',
'info_dict': {
- 'id': '325',
+ 'id': '25866',
'ext': 'mp4',
- 'title': 'Blaue Bohnen aus Blocken',
- 'description': 'md5:190d71ba2ccddc805ed01547718963bc',
- 'duration': 1715,
- },
+ 'title': 'Kartoffeltage in der Lewitz',
+ 'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
+ 'duration': 166,
+ }
},
{
'url': 'http://www.ndr.de/info/audio51535.html',
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_urllib_parse_urlparse,
+ int_or_none,
+ remove_end,
+)
+
+
+class NFLIE(InfoExtractor):
+ IE_NAME = 'nfl.com'
+ _VALID_URL = r'''(?x)https?://
+ (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
+ (?:.+?/)*
+ (?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
+ _TESTS = [
+ {
+ 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
+ 'md5': '394ef771ddcd1354f665b471d78ec4c6',
+ 'info_dict': {
+ 'id': '0ap3000000398478',
+ 'ext': 'mp4',
+ 'title': 'Week 3: Redskins vs. Eagles highlights',
+ 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
+ 'upload_date': '20140921',
+ 'timestamp': 1411337580,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ },
+ {
+ 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
+ 'info_dict': {
+ 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'ext': 'mp4',
+ 'title': 'LIVE: Post Game vs. Browns',
+ 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
+ 'upload_date': '20131229',
+ 'timestamp': 1388354455,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+ ]
+
+ @staticmethod
+ def prepend_host(host, url):
+ if not url.startswith('http'):
+ if not url.startswith('/'):
+ url = '/%s' % url
+ url = 'http://{0:}{1:}'.format(host, url)
+ return url
+
+ @staticmethod
+ def format_from_stream(stream, protocol, host, path_prefix='',
+ preference=0, note=None):
+ url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
+ protocol=protocol,
+ host=host,
+ prefix=path_prefix,
+ path=stream.get('path'),
+ )
+ return {
+ 'url': url,
+ 'vbr': int_or_none(stream.get('rate', 0), 1000),
+ 'preference': preference,
+ 'format_note': note,
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id, host = mobj.group('id'), mobj.group('host')
+
+ webpage = self._download_webpage(url, video_id)
+
+ config_url = NFLIE.prepend_host(host, self._search_regex(
+ r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL'))
+ config = self._download_json(config_url, video_id,
+ note='Downloading player config')
+ url_template = NFLIE.prepend_host(
+ host, '{contentURLTemplate:}'.format(**config))
+ video_data = self._download_json(
+ url_template.format(id=video_id), video_id)
+
+ formats = []
+ cdn_data = video_data.get('cdnData', {})
+ streams = cdn_data.get('bitrateInfo', [])
+ if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
+ parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
+ protocol, host = parts.scheme, parts.netloc
+ for stream in streams:
+ formats.append(
+ NFLIE.format_from_stream(stream, protocol, host))
+ else:
+ cdns = config.get('cdns')
+ if not cdns:
+ raise ExtractorError('Failed to get CDN data', expected=True)
+
+ for name, cdn in cdns.items():
+ # LimeLight streams don't seem to work
+ if cdn.get('name') == 'LIMELIGHT':
+ continue
+
+ protocol = cdn.get('protocol')
+ host = remove_end(cdn.get('host', ''), '/')
+ if not (protocol and host):
+ continue
+
+ prefix = cdn.get('pathprefix', '')
+ if prefix and not prefix.endswith('/'):
+ prefix = '%s/' % prefix
+
+ preference = 0
+ if protocol == 'rtmp':
+ preference = -2
+ elif 'prog' in name.lower():
+ preference = 1
+
+ for stream in streams:
+ formats.append(
+ NFLIE.format_from_stream(stream, protocol, host,
+ prefix, preference, name))
+
+ self._sort_formats(formats)
+
+ thumbnail = None
+ for q in ('xl', 'l', 'm', 's', 'xs'):
+ thumbnail = video_data.get('imagePaths', {}).get(q)
+ if thumbnail:
+ break
+
+ return {
+ 'id': video_id,
+ 'title': video_data.get('headline'),
+ 'formats': formats,
+ 'description': video_data.get('caption'),
+ 'duration': video_data.get('duration'),
+ 'thumbnail': thumbnail,
+ 'timestamp': int_or_none(video_data.get('posted'), 1000),
+ }
+from __future__ import unicode_literals
+
import re
import json
from ..utils import (
compat_urlparse,
compat_urllib_parse,
- determine_ext,
unified_strdate,
)
self.report_extraction(video_id)
initial_video_url = info['publishPoint']
- data = compat_urllib_parse.urlencode({
- 'type': 'fvod',
- 'path': initial_video_url.replace('.mp4', '_sd.mp4'),
- })
- path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
- path_doc = self._download_xml(path_url, video_id,
- u'Downloading final video url')
- video_url = path_doc.find('path').text
+ if info['formats'] == '1':
+ data = compat_urllib_parse.urlencode({
+ 'type': 'fvod',
+ 'path': initial_video_url.replace('.mp4', '_sd.mp4'),
+ })
+ path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
+ path_doc = self._download_xml(
+ path_url, video_id, 'Downloading final video url')
+ video_url = path_doc.find('path').text
+ else:
+ video_url = initial_video_url
join = compat_urlparse.urljoin
return {
'id': video_id,
'title': info['name'],
'url': video_url,
- 'ext': determine_ext(video_url),
'description': info['description'],
'duration': int(info['duration']),
'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
class NHLIE(NHLBaseInfoExtractor):
- IE_NAME = u'nhl.com'
- _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console\?.*?(?<=[?&])id=(?P<id>\d+)'
-
- _TEST = {
- u'url': u'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
- u'file': u'453614.mp4',
- u'info_dict': {
- u'title': u'Quick clip: Weise 4-3 goal vs Flames',
- u'description': u'Dale Weise scores his first of the season to put the Canucks up 4-3.',
- u'duration': 18,
- u'upload_date': u'20131006',
+ IE_NAME = 'nhl.com'
+ _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'
+
+ _TESTS = [{
+ 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
+ 'md5': 'db704a4ea09e8d3988c85e36cc892d09',
+ 'info_dict': {
+ 'id': '453614',
+ 'ext': 'mp4',
+ 'title': 'Quick clip: Weise 4-3 goal vs Flames',
+ 'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
+ 'duration': 18,
+ 'upload_date': '20131006',
},
- }
+ }, {
+ 'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
+ 'md5': 'd22e82bc592f52d37d24b03531ee9696',
+ 'info_dict': {
+ 'id': '2014020024-628-h',
+ 'ext': 'mp4',
+ 'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
+ 'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
+ 'duration': 0,
+ 'upload_date': '20141011',
+ },
+ }, {
+ 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
- info_json = self._download_webpage(json_url, video_id,
- u'Downloading info json')
- info_json = self._fix_json(info_json)
- info = json.loads(info_json)[0]
- return self._extract_video(info)
+ data = self._download_json(
+ json_url, video_id, transform_source=self._fix_json)
+ return self._extract_video(data[0])
class NHLVideocenterIE(NHLBaseInfoExtractor):
- IE_NAME = u'nhl.com:videocenter'
- IE_DESC = u'NHL videocenter category'
- _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[^&]+))?'
-
- @classmethod
- def suitable(cls, url):
- if NHLIE.suitable(url):
- return False
- return super(NHLVideocenterIE, cls).suitable(url)
+ IE_NAME = 'nhl.com:videocenter'
+ IE_DESC = 'NHL videocenter category'
+ _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
+ _TEST = {
+ 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
+ 'info_dict': {
+ 'id': '999',
+ 'title': 'Highlights',
+ },
+ 'playlist_count': 12,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
cat_id = self._search_regex(
[r'var defaultCatId = "(.+?)";',
r'{statusIndex:0,index:0,.*?id:(.*?),'],
- webpage, u'category id')
+ webpage, 'category id')
playlist_title = self._html_search_regex(
r'tab0"[^>]*?>(.*?)</td>',
- webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()
+ webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
data = compat_urllib_parse.urlencode({
'cid': cat_id,
response = self._fix_json(response)
if not response.strip():
self._downloader.report_warning(u'Got an empty reponse, trying '
- u'adding the "newvideos" parameter')
+ 'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true',
playlist_title)
response = self._fix_json(response)
'_type': 'playlist',
'title': playlist_title,
'id': cat_id,
- 'entries': [self._extract_video(i) for i in videos],
+ 'entries': [self._extract_video(v) for v in videos],
}
from __future__ import unicode_literals
import re
+import json
from .common import InfoExtractor
from ..utils import (
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
- # Determine whether the downloader uses authentication to download video
- _AUTHENTICATE = False
+ # Determine whether the downloader used authentication to download video
+ _AUTHENTICATED = False
def _real_initialize(self):
- if self._downloader.params.get('username', None) is not None:
- self._AUTHENTICATE = True
-
- if self._AUTHENTICATE:
- self._login()
+ self._login()
def _login(self):
(username, password) = self._get_login_info()
+ # No authentication to be performed
+ if not username:
+ return True
# Log in
login_form_strs = {
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
+ # Successful login
+ self._AUTHENTICATED = True
return True
def _real_extract(self, url):
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
note='Downloading video info page')
- if self._AUTHENTICATE:
+ if self._AUTHENTICATED:
# Get flv info
flv_info_webpage = self._download_webpage(
'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
'duration': duration,
'webpage_url': webpage_url,
}
+
+
+class NiconicoPlaylistIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
+
+ _TEST = {
+ 'url': 'http://www.nicovideo.jp/mylist/27411728',
+ 'info_dict': {
+ 'id': '27411728',
+ 'title': 'AKB48のオールナイトニッポン',
+ },
+ 'playlist_mincount': 225,
+ }
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+ webpage = self._download_webpage(url, list_id)
+
+ entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
+ webpage, 'entries')
+ entries = json.loads(entries_json)
+ entries = [{
+ '_type': 'url',
+ 'ie_key': NiconicoIE.ie_key(),
+ 'url': 'http://www.nicovideo.jp/watch/%s' % entry['item_id'],
+ } for entry in entries]
+
+ return {
+ '_type': 'playlist',
+ 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
+ 'id': list_id,
+ 'entries': entries,
+ }
from __future__ import unicode_literals
import re
+import time
+import hashlib
from .common import InfoExtractor
from ..utils import (
+ compat_urllib_request,
+ compat_urllib_parse,
ExtractorError,
+ clean_html,
unified_strdate,
compat_str,
)
class NocoIE(InfoExtractor):
_VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
+ _LOGIN_URL = 'http://noco.tv/do.php'
+ _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s'
+ _NETRC_MACHINE = 'noco'
_TEST = {
'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/',
'skip': 'Requires noco account',
}
+ def _real_initialize(self):
+ self._login()
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+
+ login_form = {
+ 'a': 'login',
+ 'cookie': '1',
+ 'username': username,
+ 'password': password,
+ }
+ request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
+
+ login = self._download_json(request, None, 'Logging in as %s' % username)
+
+ if 'erreur' in login:
+ raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
+
+ def _call_api(self, path, video_id, note):
+ ts = compat_str(int(time.time() * 1000))
+ tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
+ url = self._API_URL_TEMPLATE % (path, ts, tk)
+
+ resp = self._download_json(url, video_id, note)
+
+ if isinstance(resp, dict) and resp.get('error'):
+ self._raise_error(resp['error'], resp['description'])
+
+ return resp
+
+ def _raise_error(self, error, description):
+ raise ExtractorError(
+ '%s returned error: %s - %s' % (self.IE_NAME, error, description),
+ expected=True)
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- medias = self._download_json(
- 'https://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
+ medias = self._call_api(
+ 'shows/%s/medias' % video_id,
+ video_id, 'Downloading video JSON')
+
+ qualities = self._call_api(
+ 'qualities',
+ video_id, 'Downloading qualities JSON')
formats = []
- for fmt in medias['fr']['video_list']['default']['quality_list']:
- format_id = fmt['quality_key']
+ for format_id, fmt in medias['fr']['video_list']['none']['quality_list'].items():
- file = self._download_json(
- 'https://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
+ video = self._call_api(
+ 'shows/%s/video/%s/fr' % (video_id, format_id.lower()),
video_id, 'Downloading %s video JSON' % format_id)
- file_url = file['file']
+ file_url = video['file']
if not file_url:
continue
- if file_url == 'forbidden':
- raise ExtractorError(
- '%s returned error: %s - %s' % (
- self.IE_NAME, file['popmessage']['title'], file['popmessage']['message']),
- expected=True)
+ if file_url in ['forbidden', 'not found']:
+ popmessage = video['popmessage']
+ self._raise_error(popmessage['title'], popmessage['message'])
formats.append({
'url': file_url,
'abr': fmt['audiobitrate'],
'vbr': fmt['videobitrate'],
'filesize': fmt['filesize'],
- 'format_note': fmt['quality_name'],
- 'preference': fmt['priority'],
+ 'format_note': qualities[format_id]['quality_name'],
+ 'preference': qualities[format_id]['priority'],
})
self._sort_formats(formats)
- show = self._download_json(
- 'https://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
+ show = self._call_api(
+ 'shows/by_id/%s' % video_id,
+ video_id, 'Downloading show JSON')[0]
- upload_date = unified_strdate(show['indexed'])
+ upload_date = unified_strdate(show['online_date_start_utc'])
uploader = show['partner_name']
uploader_id = show['partner_key']
duration = show['duration_ms'] / 1000.0
- thumbnail = show['screenshot']
+
+ thumbnails = []
+ for thumbnail_key, thumbnail_url in show.items():
+ m = re.search(r'^screenshot_(?P<width>\d+)x(?P<height>\d+)$', thumbnail_key)
+ if not m:
+ continue
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
episode = show.get('show_TT') or show.get('show_OT')
family = show.get('family_TT') or show.get('family_OT')
'id': video_id,
'title': title,
'description': description,
- 'thumbnail': thumbnail,
+ 'thumbnails': thumbnails,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_urllib_request,
+ urlencode_postdata,
+ xpath_text,
+ xpath_with_ns,
+)
+
+_x = lambda p: xpath_with_ns(p, {'xspf': 'http://xspf.org/ns/0/'})
+
+
+class NosVideoIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?nosvideo\.com/' + \
+ '(?:embed/|\?v=)(?P<id>[A-Za-z0-9]{12})/?'
+ _PLAYLIST_URL = 'http://nosvideo.com/xml/{xml_id:s}.xml'
+ _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
+ _TEST = {
+ 'url': 'http://nosvideo.com/?v=mu8fle7g7rpq',
+ 'md5': '6124ed47130d8be3eacae635b071e6b6',
+ 'info_dict': {
+ 'id': 'mu8fle7g7rpq',
+ 'ext': 'mp4',
+ 'title': 'big_buck_bunny_480p_surround-fix.avi.mp4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ fields = {
+ 'id': video_id,
+ 'op': 'download1',
+ 'method_free': 'Continue to Video',
+ }
+ req = compat_urllib_request.Request(url, urlencode_postdata(fields))
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+ webpage = self._download_webpage(req, video_id,
+ 'Downloading download page')
+ if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+ raise ExtractorError('Video %s does not exist' % video_id,
+ expected=True)
+
+ xml_id = self._search_regex(r'php\|([^\|]+)\|', webpage, 'XML ID')
+ playlist_url = self._PLAYLIST_URL.format(xml_id=xml_id)
+ playlist = self._download_xml(playlist_url, video_id)
+
+ track = playlist.find(_x('.//xspf:track'))
+ if track is None:
+ raise ExtractorError(
+ 'XML playlist is missing the \'track\' element',
+ expected=True)
+ title = xpath_text(track, _x('./xspf:title'), 'title')
+ url = xpath_text(track, _x('./xspf:file'), 'URL', fatal=True)
+ thumbnail = xpath_text(track, _x('./xspf:image'), 'thumbnail')
+ if title is not None:
+ title = title.strip()
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
+# encoding: utf-8
from __future__ import unicode_literals
import re
class NownessIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
-
- _TEST = {
- 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
- 'md5': '068bc0202558c2e391924cb8cc470676',
- 'info_dict': {
- 'id': '2520295746001',
- 'ext': 'mp4',
- 'description': 'Candor: The Art of Gesticulation',
- 'uploader': 'Nowness',
- 'title': 'Candor: The Art of Gesticulation',
- }
- }
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
+
+ _TESTS = [
+ {
+ 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
+ 'md5': '068bc0202558c2e391924cb8cc470676',
+ 'info_dict': {
+ 'id': '2520295746001',
+ 'ext': 'mp4',
+ 'title': 'Candor: The Art of Gesticulation',
+ 'description': 'Candor: The Art of Gesticulation',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
+ }
+ },
+ {
+ 'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr',
+ 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
+ 'info_dict': {
+ 'id': '3716354522001',
+ 'ext': 'mp4',
+ 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
+ }
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
from .common import InfoExtractor
from ..utils import (
unified_strdate,
+ parse_duration,
+ qualities,
+ url_basename,
)
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://www\.npo\.nl/[^/]+/[^/]+/(?P<id>[^/?]+)'
- _TEST = {
- 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
- 'md5': '4b3f9c429157ec4775f2c9cb7b911016',
- 'info_dict': {
- 'id': 'VPWON_1220719',
- 'ext': 'mp4',
- 'title': 'Nieuwsuur',
- 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
- 'upload_date': '20140622',
+ _TESTS = [
+ {
+ 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
+ 'md5': '4b3f9c429157ec4775f2c9cb7b911016',
+ 'info_dict': {
+ 'id': 'VPWON_1220719',
+ 'ext': 'm4v',
+ 'title': 'Nieuwsuur',
+ 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
+ 'upload_date': '20140622',
+ },
},
- }
+ {
+ 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
+ 'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
+ 'info_dict': {
+ 'id': 'VARA_101191800',
+ 'ext': 'm4v',
+ 'title': 'De Mega Mike & Mega Thomas show',
+ 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
+ 'upload_date': '20090227',
+ 'duration': 2400,
+ },
+ },
+ {
+ 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
+ 'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
+ 'info_dict': {
+ 'id': 'VPWON_1169289',
+ 'ext': 'm4v',
+ 'title': 'Tegenlicht',
+ 'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
+ 'upload_date': '20130225',
+ 'duration': 3000,
+ },
+ }
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
+ return self._get_info(video_id)
+ def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
video_id,
note='Downloading token'
)
- token = self._search_regex(r'npoplayer.token = "(.+?)"', token_page, 'token')
- streams_info = self._download_json(
- 'http://ida.omroep.nl/odi/?prid=%s&puboptions=h264_std&adaptive=yes&token=%s' % (video_id, token),
- video_id
- )
+ token = self._search_regex(r'npoplayer\.token = "(.+?)"', token_page, 'token')
- stream_info = self._download_json(
- streams_info['streams'][0] + '&type=json',
- video_id,
- 'Downloading stream info'
- )
+ formats = []
+ quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
+ for format_id in metadata['pubopties']:
+ format_info = self._download_json(
+ 'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' % (video_id, format_id, token),
+ video_id, 'Downloading %s JSON' % format_id)
+ if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
+ continue
+ streams = format_info.get('streams')
+ if streams:
+ video_info = self._download_json(
+ streams[0] + '&type=json',
+ video_id, 'Downloading %s stream JSON' % format_id)
+ else:
+ video_info = format_info
+ video_url = video_info.get('url')
+ if not video_url:
+ continue
+ if format_id == 'adaptive':
+ formats.extend(self._extract_m3u8_formats(video_url, video_id))
+ else:
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ 'quality': quality(format_id),
+ })
+ self._sort_formats(formats)
return {
'id': video_id,
'title': metadata['titel'],
- 'ext': 'mp4',
- 'url': stream_info['url'],
'description': metadata['info'],
- 'thumbnail': metadata['images'][-1]['url'],
- 'upload_date': unified_strdate(metadata['gidsdatum']),
+ 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
+ 'upload_date': unified_strdate(metadata.get('gidsdatum')),
+ 'duration': parse_duration(metadata.get('tijdsduur')),
+ 'formats': formats,
}
+
+
+class TegenlichtVproIE(NPOIE):
+ IE_NAME = 'tegenlicht.vpro.nl'
+ _VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
+
+ _TESTS = [
+ {
+ 'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
+ 'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
+ 'info_dict': {
+ 'id': 'VPWON_1169289',
+ 'ext': 'm4v',
+ 'title': 'Tegenlicht',
+ 'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
+ 'upload_date': '20130225',
+ },
+ },
+ ]
+
+ def _real_extract(self, url):
+ name = url_basename(url)
+ webpage = self._download_webpage(url, name)
+ urn = self._html_search_meta('mediaurn', webpage)
+ info_page = self._download_json(
+ 'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
+ return self._get_info(info_page['mid'])
webpage = self._download_webpage(
request, video_id, 'Downloading %s page' % format_id)
video_url = self._html_search_regex(
- r'<a href="([^"]+)"\s*>Continue to watch video', webpage, '%s video URL' % format_id, fatal=False)
+ r'<a\s+href="([^"]+)"\s+class="b_link">', webpage, '%s video URL' % format_id, fatal=False)
if not video_url:
continue
formats.append({
webpage = self._download_webpage(
'http://m.nuvid.com/video/%s' % video_id, video_id, 'Downloading video page')
title = self._html_search_regex(
- r'<div class="title">\s+<h2[^>]*>([^<]+)</h2>', webpage, 'title').strip()
- thumbnail = self._html_search_regex(
- r'href="(/thumbs/[^"]+)"[^>]*data-link_type="thumbs"',
- webpage, 'thumbnail URL', fatal=False)
+ [r'<span title="([^"]+)">',
+ r'<div class="thumb-holder video">\s*<h5[^>]*>([^<]+)</h5>'], webpage, 'title').strip()
+ thumbnails = [
+ {
+ 'url': thumb_url,
+ } for thumb_url in re.findall(r'<img src="([^"]+)" alt="" />', webpage)
+ ]
+ thumbnail = thumbnails[0]['url'] if thumbnails else None
duration = parse_duration(self._html_search_regex(
- r'Length:\s*<span>(\d{2}:\d{2})</span>',webpage, 'duration', fatal=False))
+ r'<i class="fa fa-clock-o"></i>\s*(\d{2}:\d{2})', webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_regex(
- r'Added:\s*<span>(\d{4}-\d{2}-\d{2})</span>', webpage, 'upload date', fatal=False))
+ r'<i class="fa fa-user"></i>\s*(\d{4}-\d{2}-\d{2})', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
- 'thumbnail': 'http://m.nuvid.com%s' % thumbnail,
+ 'thumbnails': thumbnails,
+ 'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'age_limit': 18,
+++ /dev/null
-# coding: utf-8
-from __future__ import unicode_literals
-
-import calendar
-import datetime
-import re
-
-from .common import InfoExtractor
-
-# audios on oe1.orf.at are only available for 7 days, so we can't
-# add tests.
-
-
-class OE1IE(InfoExtractor):
- IE_DESC = 'oe1.orf.at'
- _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)'
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- show_id = mobj.group('id')
-
- data = self._download_json(
- 'http://oe1.orf.at/programm/%s/konsole' % show_id,
- show_id
- )
-
- timestamp = datetime.datetime.strptime('%s %s' % (
- data['item']['day_label'],
- data['item']['time']
- ), '%d.%m.%Y %H:%M')
- unix_timestamp = calendar.timegm(timestamp.utctimetuple())
-
- return {
- 'id': show_id,
- 'title': data['item']['title'],
- 'url': data['item']['url_stream'],
- 'ext': 'mp3',
- 'description': data['item'].get('info'),
- 'timestamp': unix_timestamp
- }
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class OktoberfestTVIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
+
+ _TEST = {
+ 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
+ 'info_dict': {
+ 'id': 'hb-zelt',
+ 'ext': 'mp4',
+ 'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._live_title(self._html_search_regex(
+ r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
+
+ clip = self._search_regex(
+ r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
+ ncurl = self._search_regex(
+ r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
+ video_url = ncurl + clip
+ thumbnail = self._search_regex(
+ r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
+ 'thumbnail', fatal=False)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'is_live': True,
+ 'thumbnail': thumbnail,
+ }
import json
from .common import InfoExtractor
-from ..utils import unescapeHTML
+from ..utils import (
+ unescapeHTML,
+ ExtractorError,
+)
class OoyalaIE(InfoExtractor):
_VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
- _TEST = {
- # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
- 'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
- 'md5': '3f5cceb3a7bf461d6c29dc466cf8033c',
- 'info_dict': {
- 'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
- 'ext': 'mp4',
- 'title': 'Explaining Data Recovery from Hard Drives and SSDs',
- 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+ _TESTS = [
+ {
+ # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
+ 'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
+ 'md5': '3f5cceb3a7bf461d6c29dc466cf8033c',
+ 'info_dict': {
+ 'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
+ 'ext': 'mp4',
+ 'title': 'Explaining Data Recovery from Hard Drives and SSDs',
+ 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+ },
+ }, {
+ # Only available for ipad
+ 'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
+ 'md5': '4b9754921fddb68106e48c142e2a01e6',
+ 'info_dict': {
+ 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
+ 'ext': 'mp4',
+ 'title': 'Simulation Overview - Levels of Simulation',
+ 'description': '',
+ },
},
- }
+ ]
@staticmethod
def _url_for_embed_code(embed_code):
player = self._download_webpage(player_url, embedCode)
mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
player, 'mobile player url')
- mobile_player = self._download_webpage(mobile_url, embedCode)
- videos_info = self._search_regex(
- r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
- mobile_player, 'info').replace('\\"','"')
- videos_more_info = self._search_regex(r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"','"')
+ # Looks like some videos are only available for particular devices
+ # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0
+ # is only available for ipad)
+ # Working around with fetching URLs for all the devices found starting with 'unknown'
+ # until we succeed or eventually fail for each device.
+ devices = re.findall(r'device\s*=\s*"([^"]+)";', player)
+ devices.remove('unknown')
+ devices.insert(0, 'unknown')
+ for device in devices:
+ mobile_player = self._download_webpage(
+ '%s&device=%s' % (mobile_url, device), embedCode,
+ 'Downloading mobile player JS for %s device' % device)
+ videos_info = self._search_regex(
+ r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
+ mobile_player, 'info', fatal=False, default=None)
+ if videos_info:
+ break
+ if not videos_info:
+ raise ExtractorError('Unable to extract info')
+ videos_info = videos_info.replace('\\"', '"')
+ videos_more_info = self._search_regex(
+ r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"')
videos_info = json.loads(videos_info)
- videos_more_info =json.loads(videos_more_info)
+ videos_more_info = json.loads(videos_more_info)
if videos_more_info.get('lineup'):
videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])]
import json
import re
+import calendar
+import datetime
from .common import InfoExtractor
from ..utils import (
)
-class ORFIE(InfoExtractor):
+class ORFTVthekIE(InfoExtractor):
+ IE_NAME = 'orf:tvthek'
+ IE_DESC = 'ORF TVthek'
_VALID_URL = r'https?://tvthek\.orf\.at/(?:programs/.+?/episodes|topics/.+?|program/[^/]+)/(?P<id>\d+)'
_TEST = {
'entries': entries,
'id': playlist_id,
}
+
+
+# Audios on ORF radio are only available for 7 days, so we can't add tests.
+
+
+class ORFOE1IE(InfoExtractor):
+ IE_NAME = 'orf:oe1'
+ IE_DESC = 'Radio Österreich 1'
+ _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ show_id = mobj.group('id')
+
+ data = self._download_json(
+ 'http://oe1.orf.at/programm/%s/konsole' % show_id,
+ show_id
+ )
+
+ timestamp = datetime.datetime.strptime('%s %s' % (
+ data['item']['day_label'],
+ data['item']['time']
+ ), '%d.%m.%Y %H:%M')
+ unix_timestamp = calendar.timegm(timestamp.utctimetuple())
+
+ return {
+ 'id': show_id,
+ 'title': data['item']['title'],
+ 'url': data['item']['url_stream'],
+ 'ext': 'mp3',
+ 'description': data['item'].get('info'),
+ 'timestamp': unix_timestamp
+ }
+
+
+class ORFFM4IE(InfoExtractor):
+ IE_DESC = 'orf:fm4'
+ IE_DESC = 'radio FM4'
+ _VALID_URL = r'http://fm4\.orf\.at/7tage/?#(?P<date>[0-9]+)/(?P<show>\w+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ show_date = mobj.group('date')
+ show_id = mobj.group('show')
+
+ data = self._download_json(
+ 'http://audioapi.orf.at/fm4/json/2.0/broadcasts/%s/4%s' % (show_date, show_id),
+ show_id
+ )
+
+ def extract_entry_dict(info, title, subtitle):
+ return {
+ 'id': info['loopStreamId'].replace('.mp3', ''),
+ 'url': 'http://loopstream01.apa.at/?channel=fm4&id=%s' % info['loopStreamId'],
+ 'title': title,
+ 'description': subtitle,
+ 'duration': (info['end'] - info['start']) / 1000,
+ 'timestamp': info['start'] / 1000,
+ 'ext': 'mp3'
+ }
+
+ entries = [extract_entry_dict(t, data['title'], data['subtitle']) for t in data['streams']]
+
+ return {
+ '_type': 'playlist',
+ 'id': show_id,
+ 'title': data['title'],
+ 'description': data['subtitle'],
+ 'entries': entries
+ }
\ No newline at end of file
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ js_to_json,
+)
+
+
+class PatreonIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(.+)'
+ _TESTS = [
+ {
+ 'url': 'http://www.patreon.com/creation?hid=743933',
+ 'md5': 'e25505eec1053a6e6813b8ed369875cc',
+ 'info_dict': {
+ 'id': '743933',
+ 'ext': 'mp3',
+ 'title': 'Episode 166: David Smalley of Dogma Debate',
+ 'uploader': 'Cognitive Dissonance Podcast',
+ 'thumbnail': 're:^https?://.*$',
+ },
+ },
+ {
+ 'url': 'http://www.patreon.com/creation?hid=754133',
+ 'md5': '3eb09345bf44bf60451b8b0b81759d0a',
+ 'info_dict': {
+ 'id': '754133',
+ 'ext': 'mp3',
+ 'title': 'CD 167 Extra',
+ 'uploader': 'Cognitive Dissonance Podcast',
+ 'thumbnail': 're:^https?://.*$',
+ },
+ },
+ ]
+
+ # Currently Patreon exposes download URL via hidden CSS, so login is not
+ # needed. Keeping this commented for when this inevitably changes.
+ '''
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+
+ login_form = {
+ 'redirectUrl': 'http://www.patreon.com/',
+ 'email': username,
+ 'password': password,
+ }
+
+ request = compat_urllib_request.Request(
+ 'https://www.patreon.com/processLogin',
+ compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ )
+ login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
+
+ if re.search(r'onLoginFailed', login_page):
+ raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
+
+ def _real_initialize(self):
+ self._login()
+ '''
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group(1)
+
+ webpage = self._download_webpage(url, video_id)
+ title = self._og_search_title(webpage).strip()
+
+ attach_fn = self._html_search_regex(
+ r'<div class="attach"><a target="_blank" href="([^"]+)">',
+ webpage, 'attachment URL', default=None)
+ if attach_fn is not None:
+ video_url = 'http://www.patreon.com' + attach_fn
+ thumbnail = self._og_search_thumbnail(webpage)
+ uploader = self._html_search_regex(
+ r'<strong>(.*?)</strong> is creating', webpage, 'uploader')
+ else:
+ playlist_js = self._search_regex(
+ r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])',
+ webpage, 'playlist JSON')
+ playlist_json = js_to_json(playlist_js)
+ playlist = json.loads(playlist_json)
+ data = playlist[0]
+ video_url = self._proto_relative_url(data['mp3'])
+ thumbnail = self._proto_relative_url(data.get('cover'))
+ uploader = data.get('artist')
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp3',
+ 'title': title,
+ 'uploader': uploader,
+ 'thumbnail': thumbnail,
+ }
from .common import InfoExtractor
from ..utils import (
+ unified_strdate,
US_RATINGS,
)
class PBSIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://
(?:
- # Direct video URL
- video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
- # Article with embedded player
- (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+)/?(?:$|[?\#]) |
+ # Direct video URL
+ video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
+ # Article with embedded player (or direct video)
+ (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
'''
- _TEST = {
- 'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
- 'md5': 'ce1888486f0908d555a8093cac9a7362',
- 'info_dict': {
- 'id': '2365006249',
- 'ext': 'mp4',
- 'title': 'A More Perfect Union',
- 'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
- 'duration': 3190,
+ _TESTS = [
+ {
+ 'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/',
+ 'md5': 'ce1888486f0908d555a8093cac9a7362',
+ 'info_dict': {
+ 'id': '2365006249',
+ 'ext': 'mp4',
+ 'title': 'A More Perfect Union',
+ 'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
+ 'duration': 3190,
+ },
+ },
+ {
+ 'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/',
+ 'md5': '143c98aa54a346738a3d78f54c925321',
+ 'info_dict': {
+ 'id': '2365297690',
+ 'ext': 'mp4',
+ 'title': 'Losing Iraq',
+ 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
+ 'duration': 5050,
+ },
+ },
+ {
+ 'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/',
+ 'md5': 'b19856d7f5351b17a5ab1dc6a64be633',
+ 'info_dict': {
+ 'id': '2201174722',
+ 'ext': 'mp4',
+ 'title': 'Cyber Schools Gain Popularity, but Quality Questions Persist',
+ 'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
+ 'duration': 801,
+ },
+ },
+ {
+ 'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/',
+ 'md5': 'c62859342be2a0358d6c9eb306595978',
+ 'info_dict': {
+ 'id': '2365297708',
+ 'ext': 'mp4',
+ 'description': 'md5:68d87ef760660eb564455eb30ca464fe',
+ 'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
+ 'duration': 6559,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ },
+ {
+ 'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
+ 'md5': '908f3e5473a693b266b84e25e1cf9703',
+ 'info_dict': {
+ 'id': '2365160389',
+ 'display_id': 'killer-typhoon',
+ 'ext': 'mp4',
+ 'description': 'md5:c741d14e979fc53228c575894094f157',
+ 'title': 'Killer Typhoon',
+ 'duration': 3172,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'upload_date': '20140122',
+ }
},
- }
+ {
+ 'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/',
+ 'info_dict': {
+ 'id': 'united-states-of-secrets',
+ },
+ 'playlist_count': 2,
+ }
+ ]
- def _extract_ids(self, url):
+ def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id')
if presumptive_id:
webpage = self._download_webpage(url, display_id)
- # frontline video embed
+ upload_date = unified_strdate(self._search_regex(
+ r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
+ webpage, 'upload date', default=None))
+
+ # tabbed frontline videos
+ tabbed_videos = re.findall(
+ r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
+ if tabbed_videos:
+ return tabbed_videos, presumptive_id, upload_date
+
+ MEDIA_ID_REGEXES = [
+ r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
+ r'class="coveplayerid">([^<]+)<', # coveplayer
+ r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
+ ]
+
media_id = self._search_regex(
- r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'",
- webpage, 'frontline video ID', fatal=False, default=None)
+ MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
if media_id:
- return media_id, presumptive_id
+ return media_id, presumptive_id, upload_date
url = self._search_regex(
- r'<iframe\s+id=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
+ r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
webpage, 'player URL')
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = video_id
- return video_id, display_id
+ return video_id, display_id, None
def _real_extract(self, url):
- video_id, display_id = self._extract_ids(url)
+ video_id, display_id, upload_date = self._extract_webpage(url)
+
+ if isinstance(video_id, list):
+ entries = [self.url_result(
+ 'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id)
+ for vid_id in video_id]
+ return self.playlist_result(entries, display_id)
info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
info = self._download_json(info_url, display_id)
return {
'id': video_id,
+ 'display_id': display_id,
'title': info['title'],
'url': info['alternate_encoding']['url'],
'ext': 'mp4',
'thumbnail': info.get('image_url'),
'duration': info.get('duration'),
'age_limit': age_limit,
+ 'upload_date': upload_date,
}
--- /dev/null
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .zdf import extract_from_xml_url
+
+
+class PhoenixIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?phoenix\.de/content/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.phoenix.de/content/884301',
+ 'md5': 'ed249f045256150c92e72dbb70eadec6',
+ 'info_dict': {
+ 'id': '884301',
+ 'ext': 'mp4',
+ 'title': 'Michael Krons mit Hans-Werner Sinn',
+ 'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
+ 'upload_date': '20141025',
+ 'uploader': 'Im Dialog',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ internal_id = self._search_regex(
+ r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
+ webpage, 'internal video ID')
+
+ api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
+ return extract_from_xml_url(self, video_id, api_url)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class PlanetaPlayIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)'
+ _API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}'
+ _THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}'
+ _TEST = {
+ 'url': 'http://planetaplay.com/?sng=3586',
+ 'md5': '9d569dceb7251a4e01355d5aea60f9db',
+ 'info_dict': {
+ 'id': '3586',
+ 'ext': 'flv',
+ 'title': 'md5:e829428ee28b1deed00de90de49d1da1',
+ }
+ }
+
+ _SONG_FORMATS = {
+ 'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'),
+ 'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'),
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ response = self._download_json(
+ self._API_URL.format(video_id), video_id)['response']
+ try:
+ data = response.get('data')[0]
+ except IndexError:
+ raise ExtractorError(
+ '%s: failed to get the playlist' % self.IE_NAME, expected=True)
+
+ title = '{song_artists:} - {sng_name:}'.format(**data)
+ thumbnail = self._THUMBNAIL_URL.format(**data)
+
+ formats = []
+ for format_id, (quality, url_template) in self._SONG_FORMATS.items():
+ formats.append({
+ 'format_id': format_id,
+ 'url': url_template.format(**data),
+ 'quality': quality,
+ 'ext': 'flv',
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import os.path
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+
+
+class PlayedIE(InfoExtractor):
+ IE_NAME = 'played.to'
+ _VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)'
+
+ _TEST = {
+ 'url': 'http://played.to/j2f2sfiiukgt',
+ 'md5': 'c2bd75a368e82980e7257bf500c00637',
+ 'info_dict': {
+ 'id': 'j2f2sfiiukgt',
+ 'ext': 'flv',
+ 'title': 'youtube-dl_test_video.mp4',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ orig_webpage = self._download_webpage(url, video_id)
+ fields = re.findall(
+ r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
+ data = dict(fields)
+
+ self._sleep(2, video_id)
+
+ post = compat_urllib_parse.urlencode(data)
+ headers = {
+ b'Content-Type': b'application/x-www-form-urlencoded',
+ }
+ req = compat_urllib_request.Request(url, post, headers)
+ webpage = self._download_webpage(
+ req, video_id, note='Downloading video page ...')
+
+ title = os.path.splitext(data['fname'])[0]
+
+ video_url = self._search_regex(
+ r'file: "?(.+?)",', webpage, 'video URL')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urllib_request,
+ ExtractorError,
+ float_or_none,
+ int_or_none,
+ str_to_int,
+)
+
+
+class PlayFMIE(InfoExtractor):
+ IE_NAME = 'play.fm'
+ _VALID_URL = r'https?://(?:www\.)?play\.fm/[^?#]*(?P<upload_date>[0-9]{8})(?P<id>[0-9]{6})(?:$|[?#])'
+
+ _TEST = {
+ 'url': 'http://www.play.fm/recording/leipzigelectronicmusicbatofarparis_fr20140712137220',
+ 'md5': 'c505f8307825a245d0c7ad1850001f22',
+ 'info_dict': {
+ 'id': '137220',
+ 'ext': 'mp3',
+ 'title': 'LEIPZIG ELECTRONIC MUSIC @ Batofar (Paris,FR) - 2014-07-12',
+ 'uploader': 'Sven Tasnadi',
+ 'uploader_id': 'sventasnadi',
+ 'duration': 5627.428,
+ 'upload_date': '20140712',
+ 'view_count': int,
+ 'comment_count': int,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ upload_date = mobj.group('upload_date')
+
+ rec_data = compat_urllib_parse.urlencode({'rec_id': video_id})
+ req = compat_urllib_request.Request(
+ 'http://www.play.fm/flexRead/recording', data=rec_data)
+ req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ rec_doc = self._download_xml(req, video_id)
+
+ error_node = rec_doc.find('./error')
+ if error_node is not None:
+ raise ExtractorError('An error occured: %s (code %s)' % (
+ error_node.text, rec_doc.find('./status').text))
+
+ recording = rec_doc.find('./recording')
+ title = recording.find('./title').text
+ view_count = str_to_int(recording.find('./stats/playcount').text)
+ comment_count = str_to_int(recording.find('./stats/comments').text)
+ duration = float_or_none(recording.find('./duration').text, scale=1000)
+ thumbnail = recording.find('./image').text
+
+ artist = recording.find('./artists/artist')
+ uploader = artist.find('./name').text
+ uploader_id = artist.find('./slug').text
+
+ video_url = '%s//%s/%s/%s/offset/0/sh/%s/rec/%s/jingle/%s/loc/%s' % (
+ 'http:', recording.find('./url').text,
+ recording.find('./_class').text, recording.find('./file_id').text,
+ rec_doc.find('./uuid').text, video_id,
+ rec_doc.find('./jingle/file_id').text,
+ 'http%3A%2F%2Fwww.play.fm%2Fplayer',
+ )
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp3',
+ 'filesize': int_or_none(recording.find('./size').text),
+ 'title': title,
+ 'upload_date': upload_date,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'duration': duration,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ }
import json
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ js_to_json,
+ qualities,
+ determine_ext,
+)
class PornHdIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)'
+ _VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
_TEST = {
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'md5': '956b8ca569f7f4d8ec563e2c41598441',
'info_dict': {
'id': '1962',
+ 'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
'ext': 'mp4',
'title': 'Sierra loves doing laundry',
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'view_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, video_id)
-
- title = self._og_search_title(webpage)
- TITLE_SUFFIX = ' porn HD Video | PornHD.com '
- if title.endswith(TITLE_SUFFIX):
- title = title[:-len(TITLE_SUFFIX)]
+ webpage = self._download_webpage(url, display_id or video_id)
+ title = self._html_search_regex(
+ r'<title>(.+) porn HD.+?</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
view_count = int_or_none(self._html_search_regex(
- r'(\d+) views </span>', webpage, 'view count', fatal=False))
-
- formats = [
- {
- 'url': format_url,
- 'ext': format.lower(),
- 'format_id': '%s-%s' % (format.lower(), quality.lower()),
- 'quality': 1 if quality.lower() == 'high' else 0,
- } for format, quality, format_url in re.findall(
- r'var __video([\da-zA-Z]+?)(Low|High)StreamUrl = \'(http://.+?)\?noProxy=1\'', webpage)
- ]
-
- mobj = re.search(r'flashVars = (?P<flashvars>{.+?});', webpage)
- if mobj:
- flashvars = json.loads(mobj.group('flashvars'))
- formats.extend([
- {
- 'url': flashvars['hashlink'].replace('?noProxy=1', ''),
- 'ext': 'flv',
- 'format_id': 'flv-low',
- 'quality': 0,
- },
- {
- 'url': flashvars['hd'].replace('?noProxy=1', ''),
- 'ext': 'flv',
- 'format_id': 'flv-high',
- 'quality': 1,
- }
- ])
- thumbnail = flashvars['urlWallpaper']
- else:
- thumbnail = self._og_search_thumbnail(webpage)
+ r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
+ thumbnail = self._search_regex(
+ r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
+ quality = qualities(['SD', 'HD'])
+ formats = [{
+ 'url': source['file'],
+ 'format_id': '%s-%s' % (source['label'], determine_ext(source['file'])),
+ 'quality': quality(source['label']),
+ } for source in json.loads(js_to_json(self._search_regex(
+ r"(?s)'sources'\s*:\s*(\[.+?\])", webpage, 'sources')))]
self._sort_formats(formats)
return {
'id': video_id,
+ 'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
class PornHubIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
+ _VALID_URL = r'^https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
_TEST = {
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
- 'file': '648719015.mp4',
'md5': '882f488fa1f0026f023f33576004a2ed',
'info_dict': {
- "uploader": "BABES-COM",
+ 'id': '648719015',
+ 'ext': 'mp4',
+ "uploader": "Babes",
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
"age_limit": 18
}
return count
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
- url = 'http://www.' + mobj.group('url')
+ video_id = self._match_id(url)
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
video_uploader = self._html_search_regex(
- r'(?s)From: .+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
+ r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
if thumbnail:
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
class PornotubeIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
+ _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
_TEST = {
- u'url': u'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
- u'file': u'1689755.flv',
- u'md5': u'374dd6dcedd24234453b295209aa69b6',
- u'info_dict': {
- u"upload_date": u"20090708",
- u"title": u"Marilyn-Monroe-Bathing",
- u"age_limit": 18
+ 'url': 'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
+ 'md5': '374dd6dcedd24234453b295209aa69b6',
+ 'info_dict': {
+ 'id': '1689755',
+ 'ext': 'flv',
+ 'upload_date': '20090708',
+ 'title': 'Marilyn-Monroe-Bathing',
+ 'age_limit': 18
}
}
# Get the video URL
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
- video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
+ video_url = self._search_regex(VIDEO_URL_RE, webpage, 'video url')
video_url = compat_urllib_parse.unquote(video_url)
#Get the uploaded date
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
- upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
- if upload_date: upload_date = unified_strdate(upload_date)
+ upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, 'upload date', fatal=False)
+ if upload_date:
+ upload_date = unified_strdate(upload_date)
age_limit = self._rta_search(webpage)
- info = {'id': video_id,
- 'url': video_url,
- 'uploader': None,
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': 'flv',
- 'format': 'flv',
- 'age_limit': age_limit}
-
- return [info]
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'upload_date': upload_date,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv',
+ 'age_limit': age_limit,
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ str_to_int,
+)
+
+
+class PornoXOIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html'
+ _TEST = {
+ 'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html',
+ 'md5': '582f28ecbaa9e6e24cb90f50f524ce87',
+ 'info_dict': {
+ 'id': '7564',
+ 'ext': 'flv',
+ 'title': 'Striptease From Sexy Secretary!',
+ 'description': 'Striptease From Sexy Secretary!',
+ 'categories': list, # NSFW
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url = self._html_search_regex(
+ r'\'file\'\s*:\s*"([^"]+)"', webpage, 'video_url')
+
+ title = self._html_search_regex(
+ r'<title>([^<]+)\s*-\s*PornoXO', webpage, 'title')
+
+ description = self._html_search_regex(
+ r'<meta name="description" content="([^"]+)\s*featuring',
+ webpage, 'description', fatal=False)
+
+ thumbnail = self._html_search_regex(
+ r'\'image\'\s*:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+ view_count = str_to_int(self._html_search_regex(
+ r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False))
+
+ categories_str = self._html_search_regex(
+ r'<meta name="description" content=".*featuring\s*([^"]+)"',
+ webpage, 'categories', fatal=False)
+ categories = (
+ None if categories_str is None
+ else categories_str.split(','))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'view_count': view_count,
+ 'age_limit': 18,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ determine_ext,
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+
+
+class PromptFileIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
+ _TEST = {
+ 'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF',
+ 'md5': 'd1451b6302da7215485837aaea882c4c',
+ 'info_dict': {
+ 'id': 'D21B4746E9-F01462F0FF',
+ 'ext': 'mp4',
+ 'title': 'Birds.mp4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
+ raise ExtractorError('Video %s does not exist' % video_id,
+ expected=True)
+
+ fields = dict(re.findall(r'''(?x)type="hidden"\s+
+ name="(.+?)"\s+
+ value="(.*?)"
+ ''', webpage))
+ post = compat_urllib_parse.urlencode(fields)
+ req = compat_urllib_request.Request(url, post)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+ webpage = self._download_webpage(
+ req, video_id, 'Downloading video page')
+
+ url = self._html_search_regex(r'url:\s*\'([^\']+)\'', webpage, 'URL')
+ title = self._html_search_regex(
+ r'<span.+title="([^"]+)">', webpage, 'title')
+ thumbnail = self._html_search_regex(
+ r'<div id="player_overlay">.*button>.*?<img src="([^"]+)"',
+ webpage, 'thumbnail', fatal=False, flags=re.DOTALL)
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ 'ext': determine_ext(title),
+ }]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
- 'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
- 'upload_date': '20130206',
+ 'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
- page = self._download_webpage(url, video_id, 'Downloading page')
-
- clip_id = self._html_search_regex(self._CLIPID_REGEXES, page, 'clip id')
+ clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
access_token = 'testclient'
client_name = 'kolibri-1.2.5'
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
- title = self._html_search_regex(self._TITLE_REGEXES, page, 'title')
- description = self._html_search_regex(self._DESCRIPTION_REGEXES, page, 'description', fatal=False)
- thumbnail = self._og_search_thumbnail(page)
+ title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
+ description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
- self._UPLOAD_DATE_REGEXES, page, 'upload date', fatal=False))
+ self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
formats = []
urls_sources = urls_sources.values()
def fix_bitrate(bitrate):
- return bitrate / 1000 if bitrate % 1000 == 0 else bitrate
+ return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source in urls_sources:
protocol = source['protocol']
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urlparse,
+ determine_ext,
+ int_or_none,
+)
+
+
+class QuickVidIE(InfoExtractor):
+ _VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
+ _TEST = {
+ 'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
+ 'md5': 'c0c72dd473f260c06c808a05d19acdc5',
+ 'info_dict': {
+ 'id': 'sUQT3RCG8dx',
+ 'ext': 'mp4',
+ 'title': 'Nick Offerman\'s Summer Reading Recap',
+ 'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
+ 'view_count': int,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
+ view_count = int_or_none(self._html_search_regex(
+ r'(?s)<div id="views">(.*?)</div>',
+ webpage, 'view count', fatal=False))
+ video_code = self._search_regex(
+ r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
+ formats = [
+ {
+ 'url': compat_urlparse.urljoin(url, src),
+ 'format_id': determine_ext(src, None),
+ } for src in re.findall('<source\s+src="([^"]+)"', video_code)
+ ]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'view_count': view_count,
+ }
from __future__ import unicode_literals
import re
-import time
from .common import InfoExtractor
-from ..utils import strip_jsonp
+from ..utils import str_or_none
class ReverbNationIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$'
_TESTS = [{
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
- 'file': '16965047.mp3',
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
'info_dict': {
+ "id": "16965047",
+ "ext": "mp3",
"title": "MONA LISA",
"uploader": "ALKILADOS",
- "uploader_id": 216429,
- "thumbnail": "//gp1.wac.edgecastcdn.net/802892/production_public/Photo/13761700/image/1366002176_AVATAR_MONA_LISA.jpg"
+ "uploader_id": "216429",
+ "thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
},
}]
song_id = mobj.group('id')
api_res = self._download_json(
- 'https://api.reverbnation.com/song/%s?callback=api_response_5&_=%d'
- % (song_id, int(time.time() * 1000)),
+ 'https://api.reverbnation.com/song/%s' % song_id,
song_id,
- transform_source=strip_jsonp,
note='Downloading information of song %s' % song_id
)
'title': api_res.get('name'),
'url': api_res.get('url'),
'uploader': api_res.get('artist', {}).get('name'),
- 'uploader_id': api_res.get('artist', {}).get('id'),
- 'thumbnail': api_res.get('image', api_res.get('thumbnail')),
+ 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')),
+ 'thumbnail': self._proto_relative_url(
+ api_res.get('image', api_res.get('thumbnail'))),
'ext': 'mp3',
'vcodec': 'none',
}
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from ..utils import (
- clean_html,
- compat_parse_qs,
-)
+from ..utils import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
- _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<video_id>[^/]+)'
+ _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
- "url": "http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/",
- 'file': 'LYV6doKo7f.mp4',
+ 'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
- "title": "Luati-le Banii sez 4 ep 1",
- "description": "re:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$",
+ 'id': 'LYV6doKo7f',
+ 'ext': 'mp4',
+ 'title': 'Luati-le Banii sez 4 ep 1',
+ 'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('video_id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- flashVars_str = self._search_regex(
- r'<param name="flashVars" value="([^"]+)"',
- webpage, 'flashVars')
- flashVars = compat_parse_qs(flashVars_str)
+ url = compat_urllib_parse_unquote(self._search_regex(
+ r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ 'ext': 'mp4',
+ }]
return {
- '_type': 'video',
'id': video_id,
- 'ext': 'mp4',
- 'url': flashVars['videoURL'][0],
- 'title': flashVars['title'][0],
- 'description': clean_html(flashVars['desc'][0]),
- 'thumbnail': flashVars['preview'][0],
+ 'formats': formats,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
}
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import parse_duration
+
+
+class RtlXlIE(InfoExtractor):
+ IE_NAME = 'rtlxl.nl'
+ _VALID_URL = r'https?://www\.rtlxl\.nl/#!/[^/]+/(?P<uuid>[^/?]+)'
+
+ _TEST = {
+ 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/6e4203a6-0a5e-3596-8424-c599a59e0677',
+ 'md5': 'cc16baa36a6c169391f0764fa6b16654',
+ 'info_dict': {
+ 'id': '6e4203a6-0a5e-3596-8424-c599a59e0677',
+ 'ext': 'mp4',
+ 'title': 'RTL Nieuws - Laat',
+ 'description': 'md5:6b61f66510c8889923b11f2778c72dc5',
+ 'timestamp': 1408051800,
+ 'upload_date': '20140814',
+ 'duration': 576.880,
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ uuid = mobj.group('uuid')
+
+ info = self._download_json(
+ 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=flash/' % uuid,
+ uuid)
+
+ material = info['material'][0]
+ episode_info = info['episodes'][0]
+
+ progname = info['abstracts'][0]['name']
+ subtitle = material['title'] or info['episodes'][0]['name']
+
+ videopath = material['videopath']
+ f4m_url = 'http://manifest.us.rtl.nl' + videopath
+
+ formats = self._extract_f4m_formats(f4m_url, uuid)
+
+ video_urlpart = videopath.split('/flash/')[1][:-4]
+ PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
+
+ formats.extend([
+ {
+ 'url': PG_URL_TEMPLATE % ('a2m', video_urlpart),
+ 'format_id': 'pg-sd',
+ },
+ {
+ 'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
+ 'format_id': 'pg-hd',
+ }
+ ])
+
+ return {
+ 'id': uuid,
+ 'title': '%s - %s' % (progname, subtitle),
+ 'formats': formats,
+ 'timestamp': material['original_date'],
+ 'description': episode_info['synopsis'],
+ 'duration': parse_duration(material.get('duration')),
+ }
'id': '99205',
'ext': 'flv',
'title': 'Medicopter 117 - Angst!',
- 'description': 'md5:895b1df01639b5f61a04fc305a5cb94d',
+ 'description': 're:^Im Therapiezentrum \'Sonnalm\' kommen durch eine Unachtsamkeit die für die B.handlung mit Phobikern gehaltenen Voglespinnen frei\. Eine Ausreißerin',
'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
'upload_date': '20080928',
'duration': 2691,
# encoding: utf-8
from __future__ import unicode_literals
-import re
import base64
+import re
+import time
from .common import InfoExtractor
from ..utils import (
struct_unpack,
+ remove_end,
)
+def _decrypt_url(png):
+ encrypted_data = base64.b64decode(png)
+ text_index = encrypted_data.find(b'tEXt')
+ text_chunk = encrypted_data[text_index - 4:]
+ length = struct_unpack('!I', text_chunk[:4])[0]
+ # Use bytearray to get integers when iterating in both python 2.x and 3.x
+ data = bytearray(text_chunk[8:8 + length])
+ data = [chr(b) for b in data if b != 0]
+ hash_index = data.index('#')
+ alphabet_data = data[:hash_index]
+ url_data = data[hash_index + 1:]
+
+ alphabet = []
+ e = 0
+ d = 0
+ for l in alphabet_data:
+ if d == 0:
+ alphabet.append(l)
+ d = e = (e + 1) % 4
+ else:
+ d -= 1
+ url = ''
+ f = 0
+ e = 3
+ b = 1
+ for letter in url_data:
+ if f == 0:
+ l = int(letter) * 10
+ f = 1
+ else:
+ if e == 0:
+ l += int(letter)
+ url += alphabet[l]
+ e = (b + 3) % 4
+ f = 0
+ b += 1
+ else:
+ e -= 1
+
+ return url
+
+
+
class RTVEALaCartaIE(InfoExtractor):
IE_NAME = 'rtve.es:alacarta'
IE_DESC = 'RTVE a la carta'
_VALID_URL = r'http://www\.rtve\.es/alacarta/videos/[^/]+/[^/]+/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/',
'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43',
'info_dict': {
'ext': 'mp4',
'title': 'Balonmano - Swiss Cup masculina. Final: España-Suecia',
},
- }
-
- def _decrypt_url(self, png):
- encrypted_data = base64.b64decode(png)
- text_index = encrypted_data.find(b'tEXt')
- text_chunk = encrypted_data[text_index-4:]
- length = struct_unpack('!I', text_chunk[:4])[0]
- # Use bytearray to get integers when iterating in both python 2.x and 3.x
- data = bytearray(text_chunk[8:8+length])
- data = [chr(b) for b in data if b != 0]
- hash_index = data.index('#')
- alphabet_data = data[:hash_index]
- url_data = data[hash_index+1:]
-
- alphabet = []
- e = 0
- d = 0
- for l in alphabet_data:
- if d == 0:
- alphabet.append(l)
- d = e = (e + 1) % 4
- else:
- d -= 1
- url = ''
- f = 0
- e = 3
- b = 1
- for letter in url_data:
- if f == 0:
- l = int(letter)*10
- f = 1
- else:
- if e == 0:
- l += int(letter)
- url += alphabet[l]
- e = (b + 3) % 4
- f = 0
- b += 1
- else:
- e -= 1
-
- return url
+ }, {
+ 'note': 'Live stream',
+ 'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/',
+ 'info_dict': {
+ 'id': '1694255',
+ 'ext': 'flv',
+ 'title': 'TODO',
+ }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id)['page']['items'][0]
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % video_id
png = self._download_webpage(png_url, video_id, 'Downloading url information')
- video_url = self._decrypt_url(png)
+ video_url = _decrypt_url(png)
return {
'id': video_id,
'title': info['title'],
'url': video_url,
- 'thumbnail': info['image'],
+ 'thumbnail': info.get('image'),
+ 'page_url': url,
+ }
+
+
+class RTVELiveIE(InfoExtractor):
+ IE_NAME = 'rtve.es:live'
+ IE_DESC = 'RTVE.es live streams'
+ _VALID_URL = r'http://www\.rtve\.es/(?:deportes/directo|noticias|television)/(?P<id>[a-zA-Z0-9-]+)'
+
+ _TESTS = [{
+ 'url': 'http://www.rtve.es/noticias/directo-la-1/',
+ 'info_dict': {
+ 'id': 'directo-la-1',
+ 'ext': 'flv',
+ 'title': 're:^La 1 de TVE [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$',
+ },
+ 'params': {
+ 'skip_download': 'live stream',
+ }
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ start_time = time.gmtime()
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ player_url = self._search_regex(
+ r'<param name="movie" value="([^"]+)"/>', webpage, 'player URL')
+ title = remove_end(self._og_search_title(webpage), ' en directo')
+ title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time)
+
+ vidplayer_id = self._search_regex(
+ r' id="vidplayer([0-9]+)"', webpage, 'internal video ID')
+ png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id
+ png = self._download_webpage(png_url, video_id, 'Downloading url information')
+ video_url = _decrypt_url(png)
+
+ return {
+ 'id': video_id,
+ 'ext': 'flv',
+ 'title': title,
+ 'url': video_url,
+ 'app': 'rtve-live-live?ovpfv=2.1.2',
+ 'player_url': player_url,
+ 'rtmp_live': True,
}
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<param name="src" value="([^"]+)"', webpage, 'video url')
title = self._html_search_regex(
- r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>', webpage, 'title')
+ r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>',
+ webpage, 'title')
description = self._html_search_regex(
- r'(?s)<div id="longdesc">(.+?)<span id="showlink">', webpage, 'description', fatal=False)
+ r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
+ webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
- r'<param name="previewImage" value="([^"]+)"', webpage, 'thumbnail', fatal=False)
+ r'<param name="previewImage" value="([^"]+)"',
+ webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.ruhd.ru' + thumbnail
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'http://rutube\.ru/tags/video/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'http://rutube.ru/tags/video/1800/',
+ 'info_dict': {
+ 'id': '1800',
+ },
+ 'playlist_mincount': 68,
+ }]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'http://rutube\.ru/metainfo/tv/(?P<id>\d+)'
+ _TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'http://rutube\.ru/video/person/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'http://rutube.ru/video/person/313878/',
+ 'info_dict': {
+ 'id': '313878',
+ },
+ 'playlist_mincount': 37,
+ }]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
return mobj.group('url')
mobj = re.search(
- r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>http://player\.(?:rutv\.ru|vgtrk\.com)/flash2v/container\.swf\?id=.+?\2)',
+ r'<meta[^>]+?property=(["\'])og:video\1[^>]+?content=(["\'])(?P<url>https?://player\.(?:rutv\.ru|vgtrk\.com)/flash2v/container\.swf\?id=.+?\2)',
webpage)
if mobj:
return mobj.group('url')
--- /dev/null
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import json
+import re
+from .common import InfoExtractor
+from ..utils import (
+ js_to_json,
+ remove_end,
+)
+
+
+class SBSIE(InfoExtractor):
+ IE_DESC = 'sbs.com.au'
+ _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/ondemand/video/(?:single/)?(?P<id>[0-9]+)'
+
+ _TESTS = [{
+ # Original URL is handled by the generic IE which finds the iframe:
+ # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation
+ 'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed',
+ 'md5': '3150cf278965eeabb5b4cea1c963fe0a',
+ 'info_dict': {
+ 'id': '320403011771',
+ 'ext': 'mp4',
+ 'title': 'Dingo Conservation',
+ 'description': 'Dingoes are on the brink of extinction; most of the animals we think are dingoes are in fact crossbred with wild dogs. This family run a dingo conservation park to prevent their extinction',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'add_ies': ['generic'],
+ },
+ {
+ 'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ release_urls_json = js_to_json(self._search_regex(
+ r'(?s)playerParams\.releaseUrls\s*=\s*(\{.*?\n\});\n',
+ webpage, ''))
+ release_urls = json.loads(release_urls_json)
+ theplatform_url = (
+ release_urls.get('progressive') or release_urls.get('standard'))
+
+ title = remove_end(self._og_search_title(webpage), ' (The Feed)')
+ description = self._html_search_meta('description', webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ return {
+ '_type': 'url_transparent',
+ 'id': video_id,
+ 'url': theplatform_url,
+
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ unified_strdate,
+ parse_duration,
+ int_or_none,
+)
+
+
+class SexyKarmaIE(InfoExtractor):
+ IE_DESC = 'Sexy Karma and Watch Indian Porn'
+ _VALID_URL = r'https?://(?:www\.)?(?:sexykarma\.com|watchindianporn\.net)/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
+ _TESTS = [{
+ 'url': 'http://www.sexykarma.com/gonewild/video/taking-a-quick-pee-yHI70cOyIHt.html',
+ 'md5': 'b9798e7d1ef1765116a8f516c8091dbd',
+ 'info_dict': {
+ 'id': 'yHI70cOyIHt',
+ 'display_id': 'taking-a-quick-pee',
+ 'ext': 'mp4',
+ 'title': 'Taking a quick pee.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'wildginger7',
+ 'upload_date': '20141007',
+ 'duration': 22,
+ 'view_count': int,
+ 'comment_count': int,
+ 'categories': list,
+ }
+ }, {
+ 'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
+ 'md5': 'dd216c68d29b49b12842b9babe762a5d',
+ 'info_dict': {
+ 'id': '8Id6EZPbuHf',
+ 'display_id': 'pot-pixie-tribute',
+ 'ext': 'mp4',
+ 'title': 'pot_pixie tribute',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'banffite',
+ 'upload_date': '20141013',
+ 'duration': 16,
+ 'view_count': int,
+ 'comment_count': int,
+ 'categories': list,
+ }
+ }, {
+ 'url': 'http://www.watchindianporn.net/video/desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number-dW2mtctxJfs.html',
+ 'md5': '9afb80675550406ed9a63ac2819ef69d',
+ 'info_dict': {
+ 'id': 'dW2mtctxJfs',
+ 'display_id': 'desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number',
+ 'ext': 'mp4',
+ 'title': 'Desi dancer namrata stripping completely nude and dancing on a hot number',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Don',
+ 'upload_date': '20140213',
+ 'duration': 83,
+ 'view_count': int,
+ 'comment_count': int,
+ 'categories': list,
+ }
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_url = self._html_search_regex(
+ r"url: escape\('([^']+)'\)", webpage, 'url')
+
+ title = self._html_search_regex(
+ r'<h2 class="he2"><span>(.*?)</span>',
+ webpage, 'title')
+ thumbnail = self._html_search_regex(
+ r'<span id="container"><img\s+src="([^"]+)"',
+ webpage, 'thumbnail', fatal=False)
+
+ uploader = self._html_search_regex(
+ r'class="aupa">\s*(.*?)</a>',
+ webpage, 'uploader')
+ upload_date = unified_strdate(self._html_search_regex(
+ r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
+
+ duration = parse_duration(self._search_regex(
+ r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
+ webpage, 'duration', fatal=False))
+
+ view_count = int_or_none(self._search_regex(
+ r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+ webpage, 'view count', fatal=False))
+ comment_count = int_or_none(self._search_regex(
+ r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+ webpage, 'comment count', fatal=False))
+
+ categories = re.findall(
+ r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
+ webpage)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': video_url,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'categories': categories,
+ }
_TEST = {
'url': 'http://shared.sx/0060718775',
- 'md5': '53e1c58fc3e777ae1dfe9e57ba2f9c72',
+ 'md5': '106fefed92a8a2adb8c98e6a0652f49b',
'info_dict': {
'id': '0060718775',
'ext': 'mp4',
- 'title': 'Big Buck Bunny Trailer',
+ 'title': 'Bmp4',
},
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urllib_request,
+ parse_duration,
+)
+
+
+class ShareSixIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)'
+ _TESTS = [
+ {
+ 'url': 'http://sharesix.com/f/OXjQ7Y6',
+ 'md5': '9e8e95d8823942815a7d7c773110cc93',
+ 'info_dict': {
+ 'id': 'OXjQ7Y6',
+ 'ext': 'mp4',
+ 'title': 'big_buck_bunny_480p_surround-fix.avi',
+ 'duration': 596,
+ 'width': 854,
+ 'height': 480,
+ },
+ },
+ {
+ 'url': 'http://sharesix.com/lfrwoxp35zdd',
+ 'md5': 'dd19f1435b7cec2d7912c64beeee8185',
+ 'info_dict': {
+ 'id': 'lfrwoxp35zdd',
+ 'ext': 'flv',
+ 'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv',
+ 'duration': 65,
+ 'width': 1280,
+ 'height': 720,
+ },
+ }
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ fields = {
+ 'method_free': 'Free'
+ }
+ post = compat_urllib_parse.urlencode(fields)
+ req = compat_urllib_request.Request(url, post)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+ webpage = self._download_webpage(req, video_id,
+ 'Downloading video page')
+
+ video_url = self._search_regex(
+ r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL')
+ title = self._html_search_regex(
+ r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title')
+ duration = parse_duration(
+ self._search_regex(
+ r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>',
+ webpage,
+ 'duration',
+ fatal=False
+ )
+ )
+
+ m = re.search(
+ r'''(?xs)<dt>Width\sx\sHeight</dt>.+?
+ <dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''',
+ webpage
+ )
+ width = height = None
+ if m:
+ width, height = int(m.group('width')), int(m.group('height'))
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': video_url,
+ 'width': width,
+ 'height': height,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'duration': duration,
+ 'formats': formats,
+ }
IE_DESC = 'Smotri.com community videos'
IE_NAME = 'smotri:community'
_VALID_URL = r'^https?://(?:www\.)?smotri\.com/community/video/(?P<communityid>[0-9A-Za-z_\'-]+)'
+ _TEST = {
+ 'url': 'http://smotri.com/community/video/kommuna',
+ 'info_dict': {
+ 'id': 'kommuna',
+ 'title': 'КПРФ',
+ },
+ 'playlist_mincount': 4,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
IE_DESC = 'Smotri.com user videos'
IE_NAME = 'smotri:user'
_VALID_URL = r'^https?://(?:www\.)?smotri\.com/user/(?P<userid>[0-9A-Za-z_\'-]+)'
+ _TESTS = [{
+ 'url': 'http://smotri.com/user/inspector',
+ 'info_dict': {
+ 'id': 'inspector',
+ 'title': 'Inspector',
+ },
+ 'playlist_mincount': 9,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
r'<a href="([^"]*)".+class="download_file_link"',
webpage, 'file url')
video_url = "http://www.sockshare.com" + video_url
- title = self._html_search_regex(r'<h1>(.+)<strong>', webpage, 'title')
+ title = self._html_search_regex((
+ r'<h1>(.+)<strong>',
+ r'var name = "([^"]+)";'),
+ webpage, 'title', default=None)
thumbnail = self._html_search_regex(
r'<img\s+src="([^"]*)".+?name="bg"',
webpage, 'thumbnail')
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/
- (?!sets/)(?P<title>[\w\d-]+)/?
+ (?!sets/|likes/?(?:$|[?#]))
+ (?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
- |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+))
+ |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
+ (?:/?\?secret_token=(?P<secret_token>[^&]+?))?$)
|(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
)
'''
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
- 'file': '62986583.mp3',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
- "upload_date": "20121011",
- "description": "No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
- "uploader": "E.T. ExTerrestrial Music",
- "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1",
- "duration": 143,
+ 'id': '62986583',
+ 'ext': 'mp3',
+ 'upload_date': '20121011',
+ 'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
+ 'uploader': 'E.T. ExTerrestrial Music',
+ 'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
+ 'duration': 143,
}
},
# not streamable song
'duration': 9,
},
},
+ # private link (alt format)
+ {
+ 'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
+ 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
+ 'info_dict': {
+ 'id': '123998367',
+ 'ext': 'mp3',
+ 'title': 'Youtube - Dl Test Video \'\' Ä↭',
+ 'uploader': 'jaimeMF',
+ 'description': 'test chars: \"\'/\\ä↭',
+ 'upload_date': '20131209',
+ 'duration': 9,
+ },
+ },
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
- 'description': 'md5:0170be75dd395c96025d210d261c784e',
+ 'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'upload_date': '20140109',
'duration': 17,
'description': info['description'],
'thumbnail': thumbnail,
'duration': int_or_none(info.get('duration'), 1000),
+ 'webpage_url': info.get('permalink_url'),
}
formats = []
if info.get('downloadable', False):
if track_id is not None:
info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
full_title = track_id
+ token = mobj.group('secret_token')
+ if token:
+ info_json_url += "&secret_token=" + token
elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
return self.url_result(query['url'][0])
class SoundcloudSetIE(SoundcloudIE):
- _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+ _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
IE_NAME = 'soundcloud:set'
- # it's in tests/test_playlists.py
- _TESTS = []
+ _TESTS = [{
+ 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
+ 'info_dict': {
+ 'title': 'The Royal Concept EP',
+ },
+ 'playlist_mincount': 6,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError('Invalid URL: %s' % url)
# extract uploader (which is in the url)
- uploader = mobj.group(1)
+ uploader = mobj.group('uploader')
# extract simple title (uploader + slug of song title)
- slug_title = mobj.group(2)
+ slug_title = mobj.group('slug_title')
full_title = '%s/sets/%s' % (uploader, slug_title)
+ url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+
+ token = mobj.group('token')
+ if token:
+ full_title += '/' + token
+ url += '/' + token
self.report_resolve(full_title)
- url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
resolv_url = self._resolv_url(url)
info = self._download_json(resolv_url, full_title)
self._downloader.report_error('unable to download video webpage: %s' % compat_str(err['error_message']))
return
- self.report_extraction(full_title)
- return {'_type': 'playlist',
- 'entries': [self._extract_info_dict(track) for track in info['tracks']],
- 'id': info['id'],
- 'title': info['title'],
- }
+ return {
+ '_type': 'playlist',
+ 'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']],
+ 'id': info['id'],
+ 'title': info['title'],
+ }
class SoundcloudUserIE(SoundcloudIE):
_VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$'
IE_NAME = 'soundcloud:user'
-
- # it's in tests/test_playlists.py
- _TESTS = []
+ _TESTS = [{
+ 'url': 'https://soundcloud.com/the-concept-band',
+ 'info_dict': {
+ 'id': '9615865',
+ 'title': 'The Royal Concept',
+ },
+ 'playlist_mincount': 12
+ }, {
+ 'url': 'https://soundcloud.com/the-concept-band/likes',
+ 'info_dict': {
+ 'id': '9615865',
+ 'title': 'The Royal Concept',
+ },
+ 'playlist_mincount': 1,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
class SoundcloudPlaylistIE(SoundcloudIE):
- _VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)'
+ _VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
IE_NAME = 'soundcloud:playlist'
-
- # it's in tests/test_playlists.py
- _TESTS = []
+ _TESTS = [{
+ 'url': 'http://api.soundcloud.com/playlists/4110309',
+ 'info_dict': {
+ 'id': '4110309',
+ 'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
+ 'description': 're:.*?TILT Brass - Bowery Poetry Club',
+ },
+ 'playlist_count': 6,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id)
- data = compat_urllib_parse.urlencode({
+ data_dict = {
'client_id': self._CLIENT_ID,
- })
+ }
+ token = mobj.group('token')
+
+ if token:
+ data_dict['secret_token'] = token
+
+ data = compat_urllib_parse.urlencode(data_dict)
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
entries = [
- self._extract_info_dict(t, quiet=True) for t in data['tracks']]
+ self._extract_info_dict(t, quiet=True, secret_token=token)
+ for t in data['tracks']]
return {
'_type': 'playlist',
compat_urllib_parse,
unified_strdate,
str_to_int,
- int_or_none,
)
from ..aes import aes_decrypt_text
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
- title = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
+ title = self._html_search_regex(
+ r'<h1>([^<]+)', webpage, 'title')
description = self._html_search_regex(
- r'<div\s+id="descriptionContent">([^<]+)<', webpage, 'description', fatal=False)
+ r'<div\s+id="descriptionContent">([^<]+)<',
+ webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
- r'flashvars\.image_url = "([^"]+)', webpage, 'thumbnail', fatal=False)
+ r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
+ webpage, 'thumbnail', fatal=False)
uploader = self._html_search_regex(
- r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False)
+ r'by:\s*<a [^>]*>(.+?)</a>',
+ webpage, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
- r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False)
- upload_date = self._html_search_regex(r'</a> on (.+?) at \d+:\d+', webpage, 'upload date', fatal=False)
- if upload_date:
- upload_date = unified_strdate(upload_date)
-
- view_count = self._html_search_regex(
- r'<div id="viewsCounter"><span>([^<]+)</span> views</div>', webpage, 'view count', fatal=False)
- if view_count:
- view_count = str_to_int(view_count)
- comment_count = int_or_none(self._html_search_regex(
- r'<span id="spCommentCount">\s*(\d+)</span> Comments</div>', webpage, 'comment count', fatal=False))
+ r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
+ webpage, 'uploader id', fatal=False)
+ upload_date = unified_strdate(self._html_search_regex(
+ r'</a> on (.+?) at \d+:\d+',
+ webpage, 'upload date', fatal=False))
- video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
+ view_count = str_to_int(self._html_search_regex(
+ r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
+ webpage, 'view count', fatal=False))
+ comment_count = str_to_int(self._html_search_regex(
+ r'Comments<span[^>]+>\s*\(([\d,\.]+)\)</span>',
+ webpage, 'comment count', fatal=False))
+
+ video_urls = list(map(
+ compat_urllib_parse.unquote,
+ re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*["\']([^"\']+)["\']', webpage)))
if webpage.find('flashvars\.encrypted = "true"') != -1:
- password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ')
- video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
+ password = self._html_search_regex(
+ r'flashvars\.video_title = "([^"]+)',
+ webpage, 'password').replace('+', ' ')
+ video_urls = list(map(
+ lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
+ video_urls))
formats = []
for video_url in video_urls:
import re
from .common import InfoExtractor
+from ..utils import compat_urlparse
class SpiegelIE(InfoExtractor):
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
'duration': 983,
},
- }, {
- 'url': 'http://www.spiegel.de/video/johann-westhauser-videobotschaft-des-hoehlenforschers-video-1502367.html',
- 'md5': '54f58ba0e752e3c07bc2a26222dd0acf',
- 'info_dict': {
- 'id': '1502367',
- 'ext': 'mp4',
- 'title': 'Videobotschaft: Höhlenforscher Westhauser dankt seinen Rettern',
- 'description': 'md5:c6f1ec11413ebd1088b6813943e5fc91',
- 'duration': 42,
- },
}]
def _real_extract(self, url):
'duration': duration,
'formats': formats,
}
+
+
+class SpiegelArticleIE(InfoExtractor):
+ _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
+ IE_NAME = 'Spiegel:Article'
+ IE_DESC = 'Articles on spiegel.de'
+ _TEST = {
+ 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
+ 'info_dict': {
+ 'id': '1516455',
+ 'ext': 'mp4',
+ 'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
+ 'description': 're:^Patrick Kämnitz gehört.{100,}',
+ },
+ }
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ video_link = self._search_regex(
+ r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
+ 'video page URL')
+ video_url = compat_urlparse.urljoin(
+ self.http_scheme() + '//spiegel.de/', video_link)
+
+ return {
+ '_type': 'url',
+ 'url': video_url,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class Sport5IE(InfoExtractor):
+ _VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
+ _TESTS = [
+ {
+ 'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
+ 'info_dict': {
+ 'id': 's5-Y59xx1-GUh2',
+ 'ext': 'mp4',
+ 'title': 'ולנסיה-קורדובה 0:3',
+ 'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
+ 'duration': 228,
+ 'categories': list,
+ },
+ 'skip': 'Blocked outside of Israel',
+ }, {
+ 'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
+ 'info_dict': {
+ 'id': 's5-SiXxx1-hKh2',
+ 'ext': 'mp4',
+ 'title': 'GOALS_CELTIC_270914.mp4',
+ 'description': '',
+ 'duration': 87,
+ 'categories': list,
+ },
+ 'skip': 'Blocked outside of Israel',
+ }
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ media_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, media_id)
+
+ video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id')
+
+ metadata = self._download_xml(
+ 'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
+ video_id)
+
+ error = metadata.find('./Error')
+ if error is not None:
+ raise ExtractorError(
+ '%s returned error: %s - %s' % (
+ self.IE_NAME,
+ error.find('./Name').text,
+ error.find('./Description').text),
+ expected=True)
+
+ title = metadata.find('./Title').text
+ description = metadata.find('./Description').text
+ duration = int(metadata.find('./Duration').text)
+
+ posters_el = metadata.find('./PosterLinks')
+ thumbnails = [{
+ 'url': thumbnail.text,
+ 'width': int(thumbnail.get('width')),
+ 'height': int(thumbnail.get('height')),
+ } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []
+
+ categories_el = metadata.find('./Categories')
+ categories = [
+ cat.get('name') for cat in categories_el.findall('./Category')
+ ] if categories_el is not None else []
+
+ formats = [{
+ 'url': fmt.text,
+ 'ext': 'mp4',
+ 'vbr': int(fmt.get('bitrate')),
+ 'width': int(fmt.get('width')),
+ 'height': int(fmt.get('height')),
+ } for fmt in metadata.findall('./PlaybackLinks/FileURL')]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnails': thumbnails,
+ 'duration': duration,
+ 'categories': categories,
+ 'formats': formats,
+ }
\ No newline at end of file
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ parse_iso8601,
+)
+
+
+class SportBoxIE(InfoExtractor):
+ _VALID_URL = r'https?://news\.sportbox\.ru/Vidy_sporta/(?:[^/]+/)+spbvideo_NI\d+_(?P<display_id>.+)'
+ _TESTS = [
+ {
+ 'url': 'http://news.sportbox.ru/Vidy_sporta/Avtosport/Rossijskij/spbvideo_NI483529_Gonka-2-zaezd-Obyedinenniy-2000-klassi-Turing-i-S',
+ 'md5': 'ff56a598c2cf411a9a38a69709e97079',
+ 'info_dict': {
+ 'id': '80822',
+ 'ext': 'mp4',
+ 'title': 'Гонка 2 заезд ««Объединенный 2000»: классы Туринг и Супер-продакшн',
+ 'description': 'md5:81715fa9c4ea3d9e7915dc8180c778ed',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1411896237,
+ 'upload_date': '20140928',
+ 'duration': 4846,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://news.sportbox.ru/Vidy_sporta/billiard/spbvideo_NI486287_CHempionat-mira-po-dinamichnoy-piramide-4',
+ 'only_matching': True,
+ }
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_id = self._search_regex(
+ r'src="/vdl/player/media/(\d+)"', webpage, 'video id')
+
+ player = self._download_webpage(
+ 'http://news.sportbox.ru/vdl/player/media/%s' % video_id,
+ display_id, 'Downloading player webpage')
+
+ hls = self._search_regex(
+ r"var\s+original_hls_file\s*=\s*'([^']+)'", player, 'hls file')
+
+ formats = self._extract_m3u8_formats(hls, display_id, 'mp4')
+
+ title = self._html_search_regex(
+ r'<h1 itemprop="name">([^<]+)</h1>', webpage, 'title')
+ description = self._html_search_regex(
+ r'(?s)<div itemprop="description">(.+?)</div>', webpage, 'description', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
+ timestamp = parse_iso8601(self._search_regex(
+ r'<span itemprop="uploadDate">([^<]+)</span>', webpage, 'timestamp', fatal=False))
+ duration = parse_duration(self._html_search_regex(
+ r'<meta itemprop="duration" content="PT([^"]+)">', webpage, 'duration', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_request,
+ parse_iso8601,
+)
+
+
+class SportDeutschlandIE(InfoExtractor):
+ _VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
+ _TESTS = [{
+ 'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
+ 'info_dict': {
+ 'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
+ 'ext': 'mp4',
+ 'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
+ 'categories': ['Badminton'],
+ 'view_count': int,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
+ 'timestamp': int,
+ 'upload_date': 're:^201408[23][0-9]$',
+ },
+ 'params': {
+ 'skip_download': 'Live stream',
+ },
+ }, {
+ 'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
+ 'info_dict': {
+ 'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
+ 'ext': 'mp4',
+ 'upload_date': '20140825',
+ 'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
+ 'timestamp': 1408976060,
+ 'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'view_count': int,
+ 'categories': ['Li-Ning Badminton WM 2014'],
+ }
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ sport_id = mobj.group('sport')
+
+ api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
+ sport_id, video_id)
+ req = compat_urllib_request.Request(api_url, headers={
+ 'Accept': 'application/vnd.vidibus.v2.html+json',
+ 'Referer': url,
+ })
+ data = self._download_json(req, video_id)
+
+ categories = list(data.get('section', {}).get('tags', {}).values())
+ asset = data['asset']
+
+ formats = []
+ smil_url = asset['video']
+ if '.smil' in smil_url:
+ m3u8_url = smil_url.replace('.smil', '.m3u8')
+ formats.extend(
+ self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))
+
+ smil_doc = self._download_xml(
+ smil_url, video_id, note='Downloading SMIL metadata')
+ base_url = smil_doc.find('./head/meta').attrib['base']
+ formats.extend([{
+ 'format_id': 'rmtp',
+ 'url': base_url,
+ 'play_path': n.attrib['src'],
+ 'ext': 'flv',
+ 'preference': -100,
+ 'format_note': 'Seems to fail at example stream',
+ } for n in smil_doc.findall('./body/video')])
+ else:
+ formats.append({'url': smil_url})
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'title': asset['title'],
+ 'thumbnail': asset.get('image'),
+ 'description': asset.get('teaser'),
+ 'categories': categories,
+ 'view_count': asset.get('views'),
+ 'rtmp_live': asset.get('live'),
+ 'timestamp': parse_iso8601(asset.get('date')),
+ }
+
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..utils import js_to_json
+
+
+class SRMediathekIE(InfoExtractor):
+ IE_DESC = 'Süddeutscher Rundfunk'
+ _VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
+
+ _TEST = {
+ 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
+ 'info_dict': {
+ 'id': '28455',
+ 'ext': 'mp4',
+ 'title': 'sportarena (26.10.2014)',
+ 'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ murls = json.loads(js_to_json(self._search_regex(
+ r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs')))
+ formats = [{'url': murl} for murl in murls]
+ self._sort_formats(formats)
+
+ title = json.loads(js_to_json(self._search_regex(
+ r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'description': self._og_search_description(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ int_or_none,
+ qualities,
+ determine_ext,
+)
+
+
+class SunPornoIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?sunporno\.com/videos/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://www.sunporno.com/videos/807778/',
+ 'md5': '6457d3c165fd6de062b99ef6c2ff4c86',
+ 'info_dict': {
+ 'id': '807778',
+ 'ext': 'flv',
+ 'title': 'md5:0a400058e8105d39e35c35e7c5184164',
+ 'description': 'md5:a31241990e1bd3a64e72ae99afb325fb',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 302,
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'title')
+ description = self._html_search_meta('description', webpage, 'description')
+ thumbnail = self._html_search_regex(
+ r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+ duration = parse_duration(self._search_regex(
+ r'Duration:\s*(\d+:\d+)\s*<', webpage, 'duration', fatal=False))
+
+ view_count = int_or_none(self._html_search_regex(
+ r'class="views">\s*(\d+)\s*<', webpage, 'view count', fatal=False))
+ comment_count = int_or_none(self._html_search_regex(
+ r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))
+
+ formats = []
+ quality = qualities(['mp4', 'flv'])
+ for video_url in re.findall(r'<source src="([^"]+)"', webpage):
+ video_ext = determine_ext(video_url)
+ formats.append({
+ 'url': video_url,
+ 'format_id': video_ext,
+ 'quality': quality(video_ext),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'formats': formats,
+ 'age_limit': 18,
+ }
'uploader': 'SWR 2',
'uploader_id': '284670',
}
- }, {
- 'url': 'http://swrmediathek.de/content/player.htm?show=52dc7e00-15c5-11e4-84bc-0026b975f2e6',
- 'md5': '881531487d0633080a8cc88d31ef896f',
- 'info_dict': {
- 'id': '52dc7e00-15c5-11e4-84bc-0026b975f2e6',
- 'ext': 'mp4',
- 'title': 'Familienspaß am Bodensee',
- 'description': 'md5:0b591225a32cfde7be1629ed49fe4315',
- 'thumbnail': 're:http://.*\.jpg',
- 'duration': 1784,
- 'upload_date': '20140727',
- 'uploader': 'SWR Fernsehen BW',
- 'uploader_id': '281130',
- }
}]
def _real_extract(self, url):
_TESTS = [{
'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
- 'md5': 'e07de1d52c7278adbb9b9b1c93a66849',
'info_dict': {
'id': 'NmqMrGnXvmO1',
'ext': 'flv',
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ clean_html,
+ compat_urllib_request,
+ float_or_none,
+ parse_iso8601,
+)
+
+
+class TapelyIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
+ _API_URL = 'http://tape.ly/showtape?id={0:}'
+ _S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
+ _SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
+ _TESTS = [
+ {
+ 'url': 'http://tape.ly/my-grief-as-told-by-water',
+ 'info_dict': {
+ 'id': 23952,
+ 'title': 'my grief as told by water',
+ 'thumbnail': 're:^https?://.*\.png$',
+ 'uploader_id': 16484,
+ 'timestamp': 1411848286,
+ 'description': 'For Robin and Ponkers, whom the tides of life have taken out to sea.',
+ },
+ 'playlist_count': 13,
+ },
+ {
+ 'url': 'http://tape.ly/my-grief-as-told-by-water/1',
+ 'md5': '79031f459fdec6530663b854cbc5715c',
+ 'info_dict': {
+ 'id': 258464,
+ 'title': 'Dreaming Awake (My Brightest Diamond)',
+ 'ext': 'm4a',
+ },
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('id')
+
+ playlist_url = self._API_URL.format(display_id)
+ request = compat_urllib_request.Request(playlist_url)
+ request.add_header('X-Requested-With', 'XMLHttpRequest')
+ request.add_header('Accept', 'application/json')
+
+ playlist = self._download_json(request, display_id)
+
+ tape = playlist['tape']
+
+ entries = []
+ for s in tape['songs']:
+ song = s['song']
+ entry = {
+ 'id': song['id'],
+ 'duration': float_or_none(song.get('songduration'), 1000),
+ 'title': song['title'],
+ }
+ if song['source'] == 'S3':
+ entry.update({
+ 'url': self._S3_SONG_URL.format(song['filename']),
+ })
+ entries.append(entry)
+ elif song['source'] == 'YT':
+ self.to_screen('YouTube video detected')
+ yt_id = song['filename'].replace('/youtube/', '')
+ entry.update(self.url_result(yt_id, 'Youtube', video_id=yt_id))
+ entries.append(entry)
+ elif song['source'] == 'SC':
+ self.to_screen('SoundCloud song detected')
+ sc_url = self._SOUNDCLOUD_SONG_URL.format(song['filename'])
+ entry.update(self.url_result(sc_url, 'Soundcloud'))
+ entries.append(entry)
+ else:
+ self.report_warning('Unknown song source: %s' % song['source'])
+
+ if mobj.group('songnr'):
+ songnr = int(mobj.group('songnr')) - 1
+ try:
+ return entries[songnr]
+ except IndexError:
+ raise ExtractorError(
+ 'No song with index: %s' % mobj.group('songnr'),
+ expected=True)
+
+ return {
+ '_type': 'playlist',
+ 'id': tape['id'],
+ 'display_id': display_id,
+ 'title': tape['name'],
+ 'entries': entries,
+ 'thumbnail': tape.get('image_url'),
+ 'description': clean_html(tape.get('subtext')),
+ 'like_count': tape.get('likescount'),
+ 'uploader_id': tape.get('user_id'),
+ 'timestamp': parse_iso8601(tape.get('published_at')),
+ }
\s*
<a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
'''
+ _TEST = {
+ 'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
+ 'info_dict': {
+ 'id': 'rbhagwati2'
+ },
+ 'playlist_mincount': 179,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("video_id")
if not video_id:
video_id = self._html_search_regex(
- r'<article class="video" data-id="(\d+?)"',
+ r'data-node-id="(\d+?)"',
webpage, 'video id')
data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
_VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P<id>\d+)/'
_TEST = {
- u'url': u'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
- u'playlist': [
+ 'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
+ 'info_dict': {
+ 'id': '57758',
+ 'title': 'Learning Topic Models --- Going beyond SVD',
+ },
+ 'playlist': [
{
- u'file': u'57758.flv',
- u'info_dict': {
- u'title': u'Learning Topic Models --- Going beyond SVD',
+ 'info_dict': {
+ 'id': '57758',
+ 'ext': 'flv',
+ 'title': 'Learning Topic Models --- Going beyond SVD',
},
},
{
- u'file': u'57758-slides.flv',
- u'info_dict': {
- u'title': u'Learning Topic Models --- Going beyond SVD',
+ 'info_dict': {
+ 'id': '57758-slides',
+ 'ext': 'flv',
+ 'title': 'Learning Topic Models --- Going beyond SVD',
},
},
],
- u'params': {
+ 'params': {
# rtmp download
- u'skip_download': True,
+ 'skip_download': True,
},
}
mobj = re.match(self._VALID_URL, url)
talk_id = mobj.group('id')
webpage = self._download_webpage(url, talk_id)
- rtmp_url = self._search_regex(r'netConnectionUrl: \'(.*?)\'', webpage,
- u'rtmp url')
- play_path = self._search_regex(r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
- webpage, u'presenter play path')
+ rtmp_url = self._search_regex(
+ r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url')
+ play_path = self._search_regex(
+ r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
+ webpage, 'presenter play path')
title = clean_html(get_element_by_attribute('class', 'title', webpage))
video_info = {
- 'id': talk_id,
- 'title': title,
- 'url': rtmp_url,
- 'play_path': play_path,
- 'ext': 'flv',
- }
+ 'id': talk_id,
+ 'title': title,
+ 'url': rtmp_url,
+ 'play_path': play_path,
+ 'ext': 'flv',
+ }
m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
if m_slides is None:
return video_info
else:
- return [
- video_info,
- # The slides video
- {
- 'id': talk_id + '-slides',
- 'title': title,
- 'url': rtmp_url,
- 'play_path': m_slides.group(1),
- 'ext': 'flv',
- },
- ]
+ return {
+ '_type': 'playlist',
+ 'id': talk_id,
+ 'title': title,
+ 'entries': [
+ video_info,
+ # The slides video
+ {
+ 'id': talk_id + '-slides',
+ 'title': title,
+ 'url': rtmp_url,
+ 'play_path': m_slides.group(1),
+ 'ext': 'flv',
+ },
+ ],
+ }
}
}, {
'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
- 'md5': '49144e345a899b8cb34d315f3b9cfeeb',
'info_dict': {
'id': '1972',
'ext': 'mp4',
'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:5174aed4d0f16021b704120360f72b92',
},
+ }, {
+ 'url': 'http://www.ted.com/playlists/who_are_the_hackers',
+ 'info_dict': {
+ 'id': '10',
+ 'title': 'Who are the hackers?',
+ },
+ 'playlist_mincount': 6,
+ }, {
+ # contains a youtube video
+ 'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
+ 'add_ie': ['Youtube'],
+ 'info_dict': {
+ 'id': '_ZG8HBuDjgc',
+ 'ext': 'mp4',
+ 'title': 'Douglas Adams: Parrots the Universe and Everything',
+ 'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
+ 'uploader': 'University of California Television (UCTV)',
+ 'uploader_id': 'UCtelevision',
+ 'upload_date': '20080522',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
_NATIVE_FORMATS = {
talk_info = self._extract_info(webpage)['talks'][0]
+ if talk_info.get('external') is not None:
+ self.to_screen('Found video from %s' % talk_info['external']['service'])
+ return {
+ '_type': 'url',
+ 'url': talk_info['external']['uri'],
+ }
+
formats = [{
'url': format_url,
'format_id': format_id,
thumbnail = 'http://' + thumbnail
return {
'id': video_id,
- 'title': talk_info['title'],
+ 'title': talk_info['title'].strip(),
'uploader': talk_info['speaker'],
'thumbnail': thumbnail,
'description': self._og_search_description(webpage),
--- /dev/null
+#coding: utf-8
+from __future__ import unicode_literals
+
+from .mitele import MiTeleIE
+
+
+class TelecincoIE(MiTeleIE):
+ IE_NAME = 'telecinco.es'
+ _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/[^/]+/(?P<episode>.*?)\.html'
+
+ _TEST = {
+ 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
+ 'info_dict': {
+ 'id': 'MDSVID20141015_0058',
+ 'ext': 'mp4',
+ 'title': 'Con Martín Berasategui, hacer un bacalao al ...',
+ 'duration': 662,
+ },
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import remove_start
+
+
+class TeleMBIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
+ _TESTS = [
+ {
+ 'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
+ 'md5': 'f45ea69878516ba039835794e0f8f783',
+ 'info_dict': {
+ 'id': '13466',
+ 'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
+ 'ext': 'mp4',
+ 'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
+ 'description': 'md5:bc5225f47b17c309761c856ad4776265',
+ 'thumbnail': 're:^http://.*\.(?:jpg|png)$',
+ }
+ },
+ {
+ # non-ASCII characters in download URL
+ 'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
+ 'md5': '6e9682736e5ccd4eab7f21e855350733',
+ 'info_dict': {
+ 'id': '13514',
+ 'display_id': 'les-reportages-havre-incendie-mortel',
+ 'ext': 'mp4',
+ 'title': 'Havré - Incendie mortel - Les reportages',
+ 'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
+ 'thumbnail': 're:^http://.*\.(?:jpg|png)$',
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ formats = []
+ for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
+ fmt = {
+ 'url': video_url,
+ 'format_id': video_url.split(':')[0]
+ }
+ rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
+ if rtmp:
+ fmt.update({
+ 'play_path': rtmp.group('playpath'),
+ 'app': rtmp.group('app'),
+ 'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
+ 'page_url': 'http://www.telemb.be',
+ 'preference': -1,
+ })
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
+ description = self._html_search_regex(
+ r'<meta property="og:description" content="(.+?)" />',
+ webpage, 'description', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class TheOnionIE(InfoExtractor):
+ _VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P<article_id>[0-9]+)/?'
+ _TEST = {
+ 'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/',
+ 'md5': '19eaa9a39cf9b9804d982e654dc791ee',
+ 'info_dict': {
+ 'id': '2133',
+ 'ext': 'mp4',
+ 'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image',
+ 'description': 'md5:cc12448686b5600baae9261d3e180910',
+ 'thumbnail': 're:^https?://.*\.jpg\?\d+$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ article_id = mobj.group('article_id')
+
+ webpage = self._download_webpage(url, article_id)
+
+ video_id = self._search_regex(
+ r'"videoId":\s(\d+),', webpage, 'video ID')
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ sources = re.findall(r'<source src="([^"]+)" type="([^"]+)"', webpage)
+ if not sources:
+ raise ExtractorError(
+ 'No sources found for video %s' % video_id, expected=True)
+
+ formats = []
+ for src, type_ in sources:
+ if type_ == 'video/mp4':
+ formats.append({
+ 'format_id': 'mp4_sd',
+ 'preference': 1,
+ 'url': src,
+ })
+ elif type_ == 'video/webm':
+ formats.append({
+ 'format_id': 'webm_sd',
+ 'preference': 0,
+ 'url': src,
+ })
+ elif type_ == 'application/x-mpegURL':
+ formats.extend(
+ self._extract_m3u8_formats(src, video_id, preference=-1))
+ else:
+ self.report_warning(
+ 'Encountered unexpected format: %s' % type_)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ }
from .common import InfoExtractor
from ..utils import (
+ compat_str,
+ determine_ext,
ExtractorError,
xpath_with_ns,
)
'skip_download': True,
},
}
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ if mobj.group('config'):
+ config_url = url+ '&form=json'
+ config_url = config_url.replace('swf/', 'config/')
+ config_url = config_url.replace('onsite/', 'onsite/config/')
+ config = self._download_json(config_url, video_id, 'Downloading config')
+ smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
+ else:
+ smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
+ 'format=smil&mbr=true'.format(video_id))
- def _get_info(self, video_id, smil_url):
- meta = self._download_xml(smil_url, video_id)
+ meta = self._download_xml(smil_url, video_id)
try:
error_msg = next(
n.attrib['abstract']
body = meta.find(_x('smil:body'))
f4m_node = body.find(_x('smil:seq//smil:video'))
- if f4m_node is not None:
+ if f4m_node is not None and '.f4m' in f4m_node.attrib['src']:
f4m_url = f4m_node.attrib['src']
if 'manifest.f4m?' not in f4m_url:
f4m_url += '?'
# the parameters are from syfy.com, other sites may use others,
# they also work for nbc.com
f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3'
- formats = [{
- 'ext': 'flv',
- 'url': f4m_url,
- }]
+ formats = self._extract_f4m_formats(f4m_url, video_id)
else:
- base_url = head.find(_x('smil:meta')).attrib['base']
- switch = body.find(_x('smil:switch'))
formats = []
- for f in switch.findall(_x('smil:video')):
- attr = f.attrib
- width = int(attr['width'])
- height = int(attr['height'])
- vbr = int(attr['system-bitrate']) // 1000
- format_id = '%dx%d_%dk' % (width, height, vbr)
- formats.append({
- 'format_id': format_id,
- 'url': base_url,
- 'play_path': 'mp4:' + attr['src'],
- 'ext': 'flv',
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- })
+ switch = body.find(_x('smil:switch'))
+ if switch is not None:
+ base_url = head.find(_x('smil:meta')).attrib['base']
+ for f in switch.findall(_x('smil:video')):
+ attr = f.attrib
+ width = int(attr['width'])
+ height = int(attr['height'])
+ vbr = int(attr['system-bitrate']) // 1000
+ format_id = '%dx%d_%dk' % (width, height, vbr)
+ formats.append({
+ 'format_id': format_id,
+ 'url': base_url,
+ 'play_path': 'mp4:' + attr['src'],
+ 'ext': 'flv',
+ 'width': width,
+ 'height': height,
+ 'vbr': vbr,
+ })
+ else:
+ switch = body.find(_x('smil:seq//smil:switch'))
+ for f in switch.findall(_x('smil:video')):
+ attr = f.attrib
+ vbr = int(attr['system-bitrate']) // 1000
+ ext = determine_ext(attr['src'])
+ if ext == 'once':
+ ext = 'mp4'
+ formats.append({
+ 'format_id': compat_str(vbr),
+ 'url': attr['src'],
+ 'vbr': vbr,
+ 'ext': ext,
+ })
self._sort_formats(formats)
return {
'thumbnail': info['defaultThumbnailUrl'],
'duration': info['duration']//1000,
}
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- if mobj.group('config'):
- config_url = url+ '&form=json'
- config_url = config_url.replace('swf/', 'config/')
- config_url = config_url.replace('onsite/', 'onsite/config/')
- config = self._download_json(config_url, video_id, 'Downloading config')
- smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
- else:
- smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
- 'format=smil&mbr=true'.format(video_id))
- return self._get_info(video_id, smil_url)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import unified_strdate
+
+
+class TheSixtyOneIE(InfoExtractor):
+ _VALID_URL = r'''(?x)https?://(?:www\.)?thesixtyone\.com/
+ (?:.*?/)*
+ (?:
+ s|
+ song/comments/list|
+ song
+ )/(?P<id>[A-Za-z0-9]+)/?$'''
+ _SONG_URL_TEMPLATE = 'http://thesixtyone.com/s/{0:}'
+ _SONG_FILE_URL_TEMPLATE = 'http://{audio_server:}.thesixtyone.com/thesixtyone_production/audio/{0:}_stream'
+ _THUMBNAIL_URL_TEMPLATE = '{photo_base_url:}_desktop'
+ _TESTS = [
+ {
+ 'url': 'http://www.thesixtyone.com/s/SrE3zD7s1jt/',
+ 'md5': '821cc43b0530d3222e3e2b70bb4622ea',
+ 'info_dict': {
+ 'id': 'SrE3zD7s1jt',
+ 'ext': 'mp3',
+ 'title': 'CASIO - Unicorn War Mixtape',
+ 'thumbnail': 're:^https?://.*_desktop$',
+ 'upload_date': '20071217',
+ 'duration': 3208,
+ }
+ },
+ {
+ 'url': 'http://www.thesixtyone.com/song/comments/list/SrE3zD7s1jt',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://www.thesixtyone.com/s/ULoiyjuJWli#/s/SrE3zD7s1jt/',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://www.thesixtyone.com/#/s/SrE3zD7s1jt/',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://www.thesixtyone.com/song/SrE3zD7s1jt/',
+ 'only_matching': True,
+ },
+ ]
+
+ _DECODE_MAP = {
+ "x": "a",
+ "m": "b",
+ "w": "c",
+ "q": "d",
+ "n": "e",
+ "p": "f",
+ "a": "0",
+ "h": "1",
+ "e": "2",
+ "u": "3",
+ "s": "4",
+ "i": "5",
+ "o": "6",
+ "y": "7",
+ "r": "8",
+ "c": "9"
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ song_id = mobj.group('id')
+
+ webpage = self._download_webpage(
+ self._SONG_URL_TEMPLATE.format(song_id), song_id)
+
+ song_data = json.loads(self._search_regex(
+ r'"%s":\s(\{.*?\})' % song_id, webpage, 'song_data'))
+ keys = [self._DECODE_MAP.get(s, s) for s in song_data['key']]
+ url = self._SONG_FILE_URL_TEMPLATE.format(
+ "".join(reversed(keys)), **song_data)
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ 'ext': 'mp3',
+ }]
+
+ return {
+ 'id': song_id,
+ 'title': '{artist:} - {name:}'.format(**song_data),
+ 'formats': formats,
+ 'comment_count': song_data.get('comments_count'),
+ 'duration': song_data.get('play_time'),
+ 'like_count': song_data.get('score'),
+ 'thumbnail': self._THUMBNAIL_URL_TEMPLATE.format(**song_data),
+ 'upload_date': unified_strdate(song_data.get('publish_date')),
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ unified_strdate
+)
+
+
+class THVideoIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?thvideo\.tv/(?:v/th|mobile\.php\?cid=)(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://thvideo.tv/v/th1987/',
+ 'md5': 'fa107b1f73817e325e9433505a70db50',
+ 'info_dict': {
+ 'id': '1987',
+ 'ext': 'mp4',
+ 'title': '【动画】秘封活动记录 ~ The Sealed Esoteric History.分镜稿预览',
+ 'display_id': 'th1987',
+ 'thumbnail': 'http://thvideo.tv/uploadfile/2014/0722/20140722013459856.jpg',
+ 'description': '社团京都幻想剧团的第一个东方二次同人动画作品「秘封活动记录 ~ The Sealed Esoteric History.」 本视频是该动画第一期的分镜草稿...',
+ 'upload_date': '20140722'
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ # extract download link from mobile player page
+ webpage_player = self._download_webpage(
+ 'http://thvideo.tv/mobile.php?cid=%s-0' % (video_id),
+ video_id, note='Downloading video source page')
+ video_url = self._html_search_regex(
+ r'<source src="(.*?)" type', webpage_player, 'video url')
+
+ # extract video info from main page
+ webpage = self._download_webpage(
+ 'http://thvideo.tv/v/th%s' % (video_id), video_id)
+ title = self._og_search_title(webpage)
+ display_id = 'th%s' % video_id
+ thumbnail = self._og_search_thumbnail(webpage)
+ description = self._og_search_description(webpage)
+ upload_date = unified_strdate(self._html_search_regex(
+ r'span itemprop="datePublished" content="(.*?)">', webpage,
+ 'upload date', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'ext': 'mp4',
+ 'url': video_url,
+ 'title': title,
+ 'display_id': display_id,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'upload_date': upload_date
+ }
+
+
+class THVideoPlaylistIE(InfoExtractor):
+ _VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://thvideo.tv/mylist2',
+ 'info_dict': {
+ 'id': '2',
+ 'title': '幻想万華鏡',
+ },
+ 'playlist_mincount': 23,
+ }
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+ list_title = self._html_search_regex(
+ r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
+ fatal=False)
+
+ entries = [
+ self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
+ for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
+
+ return self.playlist_result(entries, playlist_id, list_title)
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ fix_xml_ampersands,
+)
+
+
+class TNAFlixIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?tnaflix\.com/(?P<cat_id>[\w-]+)/(?P<display_id>[\w-]+)/video(?P<id>\d+)'
+
+ _TITLE_REGEX = None
+ _DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>'
+ _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
+
+ _TEST = {
+ 'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
+ 'md5': 'ecf3498417d09216374fc5907f9c6ec0',
+ 'info_dict': {
+ 'id': '553878',
+ 'display_id': 'Carmella-Decesare-striptease',
+ 'ext': 'mp4',
+ 'title': 'Carmella Decesare - striptease',
+ 'description': '',
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'duration': 91,
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._html_search_regex(
+ self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage)
+ description = self._html_search_regex(
+ self._DESCRIPTION_REGEX, webpage, 'description', fatal=False, default='')
+
+ age_limit = self._rta_search(webpage)
+
+ duration = self._html_search_meta('duration', webpage, 'duration', default=None)
+ if duration:
+ duration = parse_duration(duration[1:])
+
+ cfg_url = self._html_search_regex(
+ self._CONFIG_REGEX, webpage, 'flashvars.config')
+
+ cfg_xml = self._download_xml(
+ cfg_url, display_id, note='Downloading metadata',
+ transform_source=fix_xml_ampersands)
+
+ thumbnail = cfg_xml.find('./startThumb').text
+
+ formats = []
+ for item in cfg_xml.findall('./quality/item'):
+ video_url = re.sub('speed=\d+', 'speed=', item.find('videoLink').text)
+ format_id = item.find('res').text
+ fmt = {
+ 'url': video_url,
+ 'format_id': format_id,
+ }
+ m = re.search(r'^(\d+)', format_id)
+ if m:
+ fmt['height'] = int(m.group(1))
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'age_limit': age_limit,
+ 'formats': formats,
+ }
class ToypicsUserIE(InfoExtractor):
IE_DESC = 'Toypics user profile'
_VALID_URL = r'http://videos\.toypics\.net/(?P<username>[^/?]+)(?:$|[?#])'
+ _TEST = {
+ 'url': 'http://videos.toypics.net/Mikey',
+ 'info_dict': {
+ 'id': 'Mikey',
+ },
+ 'playlist_mincount': 19,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..utils import xpath_text
class TruTubeIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?trutube\.tv/video/(?P<id>[0-9]+)/.*'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
+ _TESTS = [{
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
'info_dict': {
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
'thumbnail': 're:^http:.*\.jpg$',
}
- }
+ }, {
+ 'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_title = self._og_search_title(webpage).strip()
- thumbnail = self._search_regex(
- r"var splash_img = '([^']+)';", webpage, 'thumbnail', fatal=False)
+ config = self._download_xml(
+ 'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
+ video_id, transform_source=lambda s: s.strip())
- all_formats = re.finditer(
- r"var (?P<key>[a-z]+)_video_file\s*=\s*'(?P<url>[^']+)';", webpage)
- formats = [{
- 'format_id': m.group('key'),
- 'quality': -i,
- 'url': m.group('url'),
- } for i, m in enumerate(all_formats)]
- self._sort_formats(formats)
+ # filehd is always 404
+ video_url = xpath_text(config, './file', 'video URL', fatal=True)
+ title = xpath_text(config, './title', 'title')
+ thumbnail = xpath_text(config, './image', ' thumbnail')
return {
'id': video_id,
- 'title': video_title,
- 'formats': formats,
+ 'url': video_url,
+ 'title': title,
'thumbnail': thumbnail,
}
class Tube8IE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/){2}(?P<id>\d+)'
- _TEST = {
- 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
- 'md5': '44bf12b98313827dd52d35b8706a4ea0',
- 'info_dict': {
- 'id': '229795',
- 'ext': 'mp4',
- 'description': 'hot teen Kasia grinding',
- 'uploader': 'unknown',
- 'title': 'Kasia music video',
- 'age_limit': 18,
- }
- }
+ _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
+ _TESTS = [
+ {
+ 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
+ 'md5': '44bf12b98313827dd52d35b8706a4ea0',
+ 'info_dict': {
+ 'id': '229795',
+ 'display_id': 'kasia-music-video',
+ 'ext': 'mp4',
+ 'description': 'hot teen Kasia grinding',
+ 'uploader': 'unknown',
+ 'title': 'Kasia music video',
+ 'age_limit': 18,
+ }
+ },
+ {
+ 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
+ 'only_matching': True,
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(req, video_id)
+ webpage = self._download_webpage(req, display_id)
flashvars = json.loads(self._html_search_regex(
r'var flashvars\s*=\s*({.+?})', webpage, 'flashvars'))
return {
'id': video_id,
+ 'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
# coding: utf-8
+from __future__ import unicode_literals
+
import re
import json
class TudouIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?'
_TESTS = [{
- u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
- u'file': u'159448201.f4v',
- u'md5': u'140a49ed444bd22f93330985d8475fcb',
- u'info_dict': {
- u"title": u"卡马乔国足开大脚长传冲吊集锦"
+ 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
+ 'md5': '140a49ed444bd22f93330985d8475fcb',
+ 'info_dict': {
+ 'id': '159448201',
+ 'ext': 'f4v',
+ 'title': '卡马乔国足开大脚长传冲吊集锦',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
+ 'info_dict': {
+ 'id': '117049447',
+ 'ext': 'f4v',
+ 'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
+ 'thumbnail': 're:^https?://.*\.jpg$',
}
- },
- {
- u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
- u'file': u'todo.mp4',
- u'md5': u'todo.mp4',
- u'info_dict': {
- u'title': u'todo.mp4',
+ }, {
+ 'url': 'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html',
+ 'info_dict': {
+ 'title': 'todo.mp4',
},
- u'add_ie': [u'Youku'],
- u'skip': u'Only works from China'
+ 'add_ie': ['Youku'],
+ 'skip': 'Only works from China'
}]
def _url_for_id(self, id, quality = None):
if m and m.group(1):
return {
'_type': 'url',
- 'url': u'youku:' + m.group(1),
+ 'url': 'youku:' + m.group(1),
'ie_key': 'Youku'
}
title = self._search_regex(
- r",kw:\s*['\"](.+?)[\"']", webpage, u'title')
+ r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
thumbnail_url = self._search_regex(
- r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False)
+ r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
segments = json.loads(segs_json)
# It looks like the keys are the arguments that have to be passed as
# the hd field in the request url, we pick the higher
- quality = sorted(segments.keys())[-1]
+ # Also, filter non-number qualities (see issue #3643).
+ quality = sorted(filter(lambda k: k.isdigit(), segments.keys()),
+ key=lambda k: int(k))[-1]
parts = segments[quality]
result = []
len_parts = len(parts)
part_id = part['k']
final_url = self._url_for_id(part_id, quality)
ext = (final_url.split('?')[0]).split('.')[-1]
- part_info = {'id': part_id,
- 'url': final_url,
- 'ext': ext,
- 'title': title,
- 'thumbnail': thumbnail_url,
- }
+ part_info = {
+ 'id': '%s' % part_id,
+ 'url': final_url,
+ 'ext': ext,
+ 'title': title,
+ 'thumbnail': thumbnail_url,
+ }
result.append(part_info)
return result
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
class TumblrIE(InfoExtractor):
- _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)($|/)'
+ _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
- 'description': 'md5:dfac39636969fe6bf1caa2d50405f069',
+ 'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
- 'title': '5SOS STRUM ;)',
+ 'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': 're:http://.*\.jpg',
}
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage = self._download_webpage(url, video_id)
- re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
- video = re.search(re_video, webpage)
- if video is None:
- raise ExtractorError('Unable to extract video')
- video_url = video.group('video_url')
- ext = video.group('ext')
-
- video_thumbnail = self._search_regex(
- r'posters.*?\[\\x22(.*?)\\x22',
- webpage, 'thumbnail', fatal=False) # We pick the first poster
- if video_thumbnail:
- video_thumbnail = video_thumbnail.replace('\\\\/', '/')
+ iframe_url = self._search_regex(
+ r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
+ webpage, 'iframe url')
+ iframe = self._download_webpage(iframe_url, video_id)
+ video_url = self._search_regex(r'<source src="([^"]+)"',
+ iframe, 'video url')
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
- video_title = self._html_search_regex(r'<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
- webpage, 'title', flags=re.DOTALL)
+ video_title = self._html_search_regex(
+ r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
+ webpage, 'title')
- return [{'id': video_id,
- 'url': video_url,
- 'title': video_title,
- 'description': self._html_search_meta('description', webpage),
- 'thumbnail': video_thumbnail,
- 'ext': ext
- }]
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_title,
+ 'description': self._og_search_description(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ qualities,
+ xpath_text,
+)
+
+
+class TurboIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
+ _API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
+ _TEST = {
+ 'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
+ 'md5': '33f4b91099b36b5d5a91f84b5bcba600',
+ 'info_dict': {
+ 'id': '454443',
+ 'ext': 'mp4',
+ 'duration': 3715,
+ 'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
+ 'description': 'Retrouvez dans cette rubrique toutes les vidéos de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ playlist = self._download_xml(self._API_URL.format(video_id), video_id)
+ item = playlist.find('./channel/item')
+ if item is None:
+ raise ExtractorError('Playlist item was not found', expected=True)
+
+ title = xpath_text(item, './title', 'title')
+ duration = int_or_none(xpath_text(item, './durate', 'duration'))
+ thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
+ description = self._og_search_description(webpage)
+
+ formats = []
+ get_quality = qualities(['3g', 'sd', 'hq'])
+ for child in item:
+ m = re.search(r'url_video_(?P<quality>.+)', child.tag)
+ if m:
+ quality = m.group('quality')
+ formats.append({
+ 'format_id': quality,
+ 'url': child.text,
+ 'quality': get_quality(quality),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'duration': duration,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'formats': formats,
+ }
from .common import InfoExtractor
from ..utils import (
- unified_strdate,
- clean_html,
- int_or_none,
+ float_or_none,
+ str_to_int,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
- _VALID_URL = r'http://(?:www\.)?tvigle\.ru/category/.+?[\?&]v(?:ideo)?=(?P<id>\d+)'
+ _VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$'
_TESTS = [
{
- 'url': 'http://www.tvigle.ru/category/cinema/1608/?video=503081',
- 'md5': '09afba4616666249f087efc6dcf83cb3',
+ 'url': 'http://www.tvigle.ru/video/brat/',
+ 'md5': 'ff4344a4894b0524441fb6f8218dc716',
'info_dict': {
- 'id': '503081',
- 'ext': 'flv',
- 'title': 'Брат 2 ',
- 'description': 'md5:f5a42970f50648cee3d7ad740f3ae769',
- 'upload_date': '20110919',
+ 'id': '5118490',
+ 'display_id': 'brat',
+ 'ext': 'mp4',
+ 'title': 'Брат',
+ 'description': 'md5:d16ac7c0b47052ea51fddb92c4e413eb',
+ 'duration': 5722.6,
+ 'age_limit': 16,
},
},
{
- 'url': 'http://www.tvigle.ru/category/men/vysotskiy_vospominaniya02/?flt=196&v=676433',
- 'md5': 'e7efe5350dd5011d0de6550b53c3ba7b',
+ 'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
+ 'md5': 'd9012d7c7c598fe7a11d7fb46dc1f574',
'info_dict': {
- 'id': '676433',
- 'ext': 'flv',
+ 'id': '5142516',
+ 'ext': 'mp4',
'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
'description': 'md5:027f7dc872948f14c96d19b4178428a4',
- 'upload_date': '20121218',
+ 'duration': 186.080,
+ 'age_limit': 0,
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
- video_data = self._download_xml(
- 'http://www.tvigle.ru/xml/single.php?obj=%s' % video_id, video_id, 'Downloading video XML')
+ webpage = self._download_webpage(url, display_id)
- video = video_data.find('./video')
+ video_id = self._html_search_regex(
+ r'<li class="video-preview current_playing" id="(\d+)">', webpage, 'video id')
- title = video.get('name')
- description = video.get('anons')
- if description:
- description = clean_html(description)
- thumbnail = video_data.get('img')
- upload_date = unified_strdate(video.get('date'))
- like_count = int_or_none(video.get('vtp'))
+ video_data = self._download_json(
+ 'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id)
- formats = []
- for num, (format_id, format_note) in enumerate([['low_file', 'SQ'], ['file', 'HQ'], ['hd', 'HD 720']]):
- video_url = video.get(format_id)
- if not video_url:
- continue
- formats.append({
- 'url': video_url,
- 'format_id': format_id,
- 'format_note': format_note,
- 'quality': num,
- })
+ item = video_data['playlist']['items'][0]
+
+ title = item['title']
+ description = item['description']
+ thumbnail = item['thumbnail']
+ duration = float_or_none(item['durationMilliseconds'], 1000)
+ age_limit = str_to_int(item['ageRestrictions'])
+ formats = []
+ for vcodec, fmts in item['videos'].items():
+ for quality, video_url in fmts.items():
+ formats.append({
+ 'url': video_url,
+ 'format_id': '%s-%s' % (vcodec, quality),
+ 'vcodec': vcodec,
+ 'height': int(quality[:-1]),
+ 'filesize': item['video_files_size'][vcodec][quality],
+ })
self._sort_formats(formats)
return {
'id': video_id,
+ 'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'like_count': like_count,
- 'age_limit': 18,
+ 'duration': duration,
+ 'age_limit': age_limit,
'formats': formats,
}
\ No newline at end of file
from .common import InfoExtractor
from ..utils import (
ExtractorError,
+ compat_str,
parse_iso8601,
qualities,
)
class TVPlayIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?tvplay\.lv/parraides/[^/]+/(?P<id>\d+)'
+ IE_DESC = 'TV3Play and related services'
+ _VALID_URL = r'''(?x)http://(?:www\.)?
+ (?:tvplay\.lv/parraides|
+ tv3play\.lt/programos|
+ tv3play\.ee/sisu|
+ tv3play\.se/program|
+ tv6play\.se/program|
+ tv8play\.se/program|
+ tv10play\.se/program|
+ tv3play\.no/programmer|
+ viasat4play\.no/programmer|
+ tv6play\.no/programmer|
+ tv3play\.dk/programmer|
+ )/[^/]+/(?P<id>\d+)
+ '''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'skip_download': True,
},
},
+ {
+ 'url': 'http://www.tv3play.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
+ 'info_dict': {
+ 'id': '409229',
+ 'ext': 'flv',
+ 'title': 'Moterys meluoja geriau',
+ 'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
+ 'duration': 1330,
+ 'timestamp': 1403769181,
+ 'upload_date': '20140626',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
+ 'info_dict': {
+ 'id': '238551',
+ 'ext': 'flv',
+ 'title': 'Kodu keset linna 398537',
+ 'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
+ 'duration': 1257,
+ 'timestamp': 1292449761,
+ 'upload_date': '20101215',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
+ 'info_dict': {
+ 'id': '395385',
+ 'ext': 'flv',
+ 'title': 'Husräddarna S02E07',
+ 'description': 'md5:f210c6c89f42d4fc39faa551be813777',
+ 'duration': 2574,
+ 'timestamp': 1400596321,
+ 'upload_date': '20140520',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
+ 'info_dict': {
+ 'id': '266636',
+ 'ext': 'flv',
+ 'title': 'Den sista dokusåpan S01E08',
+ 'description': 'md5:295be39c872520221b933830f660b110',
+ 'duration': 1492,
+ 'timestamp': 1330522854,
+ 'upload_date': '20120229',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
+ 'info_dict': {
+ 'id': '282756',
+ 'ext': 'flv',
+ 'title': 'Antikjakten S01E10',
+ 'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
+ 'duration': 2646,
+ 'timestamp': 1348575868,
+ 'upload_date': '20120925',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
+ 'info_dict': {
+ 'id': '230898',
+ 'ext': 'flv',
+ 'title': 'Anna Anka søker assistent - Ep. 8',
+ 'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
+ 'duration': 2656,
+ 'timestamp': 1277720005,
+ 'upload_date': '20100628',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
+ 'info_dict': {
+ 'id': '21873',
+ 'ext': 'flv',
+ 'title': 'Budbringerne program 10',
+ 'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
+ 'duration': 1297,
+ 'timestamp': 1254205102,
+ 'upload_date': '20090929',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
+ 'info_dict': {
+ 'id': '361883',
+ 'ext': 'flv',
+ 'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
+ 'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
+ 'duration': 2594,
+ 'timestamp': 1393236292,
+ 'upload_date': '20140224',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ },
]
def _real_extract(self, url):
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams['streams'].items():
- if not video_url:
+ if not video_url or not isinstance(video_url, compat_str):
continue
fmt = {
'format_id': format_id,
--- /dev/null
+from __future__ import unicode_literals
+
+import itertools
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ parse_iso8601,
+)
+
+
+class TwitchIE(InfoExtractor):
+ # TODO: One broadcast may be split into multiple videos. The key
+ # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+ # starts at 1 and increases. Can we treat all parts as one video?
+ _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?twitch\.tv/
+ (?:
+ (?P<channelid>[^/]+)|
+ (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
+ (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
+ )
+ /?(?:\#.*)?$
+ """
+ _PAGE_LIMIT = 100
+ _API_BASE = 'https://api.twitch.tv'
+ _TESTS = [{
+ 'url': 'http://www.twitch.tv/riotgames/b/577357806',
+ 'info_dict': {
+ 'id': 'a577357806',
+ 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
+ },
+ 'playlist_mincount': 12,
+ }, {
+ 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
+ 'info_dict': {
+ 'id': 'c5285812',
+ 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
+ },
+ 'playlist_mincount': 3,
+ }, {
+ 'url': 'http://www.twitch.tv/vanillatv',
+ 'info_dict': {
+ 'id': 'vanillatv',
+ 'title': 'VanillaTV',
+ },
+ 'playlist_mincount': 412,
+ }]
+
+ def _handle_error(self, response):
+ if not isinstance(response, dict):
+ return
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
+ expected=True)
+
+ def _download_json(self, url, video_id, note='Downloading JSON metadata'):
+ response = super(TwitchIE, self)._download_json(url, video_id, note)
+ self._handle_error(response)
+ return response
+
+ def _extract_media(self, item, item_id):
+ ITEMS = {
+ 'a': 'video',
+ 'c': 'chapter',
+ }
+ info = self._extract_info(self._download_json(
+ '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+ 'Downloading %s info JSON' % ITEMS[item]))
+ response = self._download_json(
+ '%s/api/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
+ 'Downloading %s playlist JSON' % ITEMS[item])
+ entries = []
+ chunks = response['chunks']
+ qualities = list(chunks.keys())
+ for num, fragment in enumerate(zip(*chunks.values()), start=1):
+ formats = []
+ for fmt_num, fragment_fmt in enumerate(fragment):
+ format_id = qualities[fmt_num]
+ fmt = {
+ 'url': fragment_fmt['url'],
+ 'format_id': format_id,
+ 'quality': 1 if format_id == 'live' else 0,
+ }
+ m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
+ if m:
+ fmt['height'] = int(m.group('height'))
+ formats.append(fmt)
+ self._sort_formats(formats)
+ entry = dict(info)
+ entry['id'] = '%s_%d' % (entry['id'], num)
+ entry['title'] = '%s part %d' % (entry['title'], num)
+ entry['formats'] = formats
+ entries.append(entry)
+ return self.playlist_result(entries, info['id'], info['title'])
+
+ def _extract_info(self, info):
+ return {
+ 'id': info['_id'],
+ 'title': info['title'],
+ 'description': info['description'],
+ 'duration': info['length'],
+ 'thumbnail': info['preview'],
+ 'uploader': info['channel']['display_name'],
+ 'uploader_id': info['channel']['name'],
+ 'timestamp': parse_iso8601(info['recorded_at']),
+ 'view_count': info['views'],
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj.group('chapterid'):
+ return self._extract_media('c', mobj.group('chapterid'))
+
+ """
+ webpage = self._download_webpage(url, chapter_id)
+ m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
+ if not m:
+ raise ExtractorError('Cannot find archive of a chapter')
+ archive_id = m.group(1)
+
+ api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+ doc = self._download_xml(
+ api, chapter_id,
+ note='Downloading chapter information',
+ errnote='Chapter information download failed')
+ for a in doc.findall('.//archive'):
+ if archive_id == a.find('./id').text:
+ break
+ else:
+ raise ExtractorError('Could not find chapter in chapter information')
+
+ video_url = a.find('./video_file_url').text
+ video_ext = video_url.rpartition('.')[2] or 'flv'
+
+ chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
+ chapter_info = self._download_json(
+ chapter_api_url, 'c' + chapter_id,
+ note='Downloading chapter metadata',
+ errnote='Download of chapter metadata failed')
+
+ bracket_start = int(doc.find('.//bracket_start').text)
+ bracket_end = int(doc.find('.//bracket_end').text)
+
+ # TODO determine start (and probably fix up file)
+ # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+ #video_url += '?start=' + TODO:start_timestamp
+ # bracket_start is 13290, but we want 51670615
+ self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
+ 'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
+
+ info = {
+ 'id': 'c' + chapter_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': chapter_info['title'],
+ 'thumbnail': chapter_info['preview'],
+ 'description': chapter_info['description'],
+ 'uploader': chapter_info['channel']['display_name'],
+ 'uploader_id': chapter_info['channel']['name'],
+ }
+ return info
+ """
+ elif mobj.group('videoid'):
+ return self._extract_media('a', mobj.group('videoid'))
+ elif mobj.group('channelid'):
+ channel_id = mobj.group('channelid')
+ info = self._download_json(
+ '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
+ channel_id, 'Downloading channel info JSON')
+ channel_name = info.get('display_name') or info.get('name')
+ entries = []
+ offset = 0
+ limit = self._PAGE_LIMIT
+ for counter in itertools.count(1):
+ response = self._download_json(
+ '%s/kraken/channels/%s/videos/?offset=%d&limit=%d'
+ % (self._API_BASE, channel_id, offset, limit),
+ channel_id, 'Downloading channel videos JSON page %d' % counter)
+ videos = response['videos']
+ if not videos:
+ break
+ entries.extend([self.url_result(video['url'], 'Twitch') for video in videos])
+ offset += limit
+ return self.playlist_result(entries, channel_id, channel_name)
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
+from ..utils import qualities
+
class UnistraIE(InfoExtractor):
- _VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(\d+)'
-
- _TEST = {
- u'url': u'http://utv.unistra.fr/video.php?id_video=154',
- u'file': u'154.mp4',
- u'md5': u'736f605cfdc96724d55bb543ab3ced24',
- u'info_dict': {
- u'title': u'M!ss Yella',
- u'description': u'md5:104892c71bd48e55d70b902736b81bbf',
+ _VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
+
+ _TESTS = [
+ {
+ 'url': 'http://utv.unistra.fr/video.php?id_video=154',
+ 'md5': '736f605cfdc96724d55bb543ab3ced24',
+ 'info_dict': {
+ 'id': '154',
+ 'ext': 'mp4',
+ 'title': 'M!ss Yella',
+ 'description': 'md5:104892c71bd48e55d70b902736b81bbf',
+ },
},
- }
+ {
+ 'url': 'http://utv.unistra.fr/index.php?id_video=437',
+ 'md5': '1ddddd6cccaae76f622ce29b8779636d',
+ 'info_dict': {
+ 'id': '437',
+ 'ext': 'mp4',
+ 'title': 'Prix Louise Weiss 2014',
+ 'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
+ },
+ }
+ ]
def _real_extract(self, url):
- id = re.match(self._VALID_URL, url).group(1)
- webpage = self._download_webpage(url, id)
- file = re.search(r'file: "(.*?)",', webpage).group(1)
- title = self._html_search_regex(r'<title>UTV - (.*?)</', webpage, u'title')
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
- video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file
+ webpage = self._download_webpage(url, video_id)
- return {'id': id,
- 'title': title,
- 'ext': 'mp4',
- 'url': video_url,
- 'description': self._html_search_regex(r'<meta name="Description" content="(.*?)"', webpage, u'description', flags=re.DOTALL),
- 'thumbnail': self._search_regex(r'image: "(.*?)"', webpage, u'thumbnail'),
- }
+ files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
+
+ quality = qualities(['SD', 'HD'])
+ formats = []
+ for file_path in files:
+ format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
+ formats.append({
+ 'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
+ 'format_id': format_id,
+ 'quality': quality(format_id)
+ })
+
+ title = self._html_search_regex(
+ r'<title>UTV - (.*?)</', webpage, 'title')
+ description = self._html_search_regex(
+ r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
+ thumbnail = self._search_regex(
+ r'image: "(.*?)"', webpage, 'thumbnail')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'formats': formats
+ }
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
class UstreamChannelIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
IE_NAME = 'ustream:channel'
+ _TEST = {
+ 'url': 'http://www.ustream.tv/channel/channeljapan',
+ 'info_dict': {
+ 'id': '10874166',
+ },
+ 'playlist_mincount': 17,
+ }
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- slug = m.group('slug')
- webpage = self._download_webpage(url, slug)
+ display_id = m.group('slug')
+ webpage = self._download_webpage(url, display_id)
channel_id = get_meta_content('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
video_ids = []
while next_url:
- reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
+ reply = self._download_json(
+ compat_urlparse.urljoin(BASE, next_url), display_id,
+ note='Downloading video information (next: %d)' % (len(video_ids) + 1))
video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
next_url = reply['nextUrl']
- urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
- url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]
- return self.playlist_result(url_entries, channel_id)
+ entries = [
+ self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
+ for vid in video_ids]
+ return {
+ '_type': 'playlist',
+ 'id': channel_id,
+ 'display_id': display_id,
+ 'entries': entries,
+ }
'md5': '99f65c0c9ef9b682b97313e052734c3f',
'info_dict': {
'id': '249bb972c2',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
},
}
return {
'id': video_id,
'url': final_url,
- 'ext': 'flv',
'title': title,
'thumbnail': thumbnail_url,
}
_TEST = {
'url': 'http://veehd.com/video/4686958',
- 'file': '4686958.mp4',
'info_dict': {
+ 'id': '4686958',
+ 'ext': 'mp4',
'title': 'Time Lapse View from Space ( ISS)',
'uploader_id': 'spotted',
'description': 'md5:f0094c4cf3a72e22bc4e4239ef767ad7',
from .common import InfoExtractor
from ..utils import (
- compat_HTTPError,
+ compat_urllib_request,
ExtractorError,
)
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
- "md5": "06bea460acb744eab74a9d7dcb4bfd61",
+ "md5": "95ee28ee45e70130e3ab02b0f579ae23",
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
- 'md5': '893ec0e0d4426a1d96c01de8f2bdff58',
+ 'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
+ def _real_initialize(self):
+ req = compat_urllib_request.Request(
+ 'http://www.vevo.com/auth', data=b'')
+ webpage = self._download_webpage(
+ req, None,
+ note='Retrieving oauth token',
+ errnote='Unable to retrieve oauth token',
+ fatal=False)
+ if webpage is False:
+ self._oauth_token = None
+ else:
+ self._oauth_token = self._search_regex(
+ r'access_token":\s*"([^"]+)"',
+ webpage, 'access token', fatal=False)
+
def _formats_from_json(self, video_info):
last_version = {'version': -1}
for version in video_info['videoVersions']:
})
return formats
+ def _download_api_formats(self, video_id):
+ if not self._oauth_token:
+ self._downloader.report_warning(
+ 'No oauth token available, skipping API HLS download')
+ return []
+
+ api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
+ video_id, self._oauth_token)
+ api_data = self._download_json(
+ api_url, video_id,
+ note='Downloading HLS formats',
+ errnote='Failed to download HLS format list', fatal=False)
+ if api_data is None:
+ return []
+
+ m3u8_url = api_data[0]['url']
+ return self._extract_m3u8_formats(
+ m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
+ preference=0)
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
else:
age_limit = None
- # Download SMIL
- smil_blocks = sorted((
- f for f in video_info['videoVersions']
- if f['sourceType'] == 13),
- key=lambda f: f['version'])
-
- smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
- self._SMIL_BASE_URL, video_id, video_id.lower())
- if smil_blocks:
- smil_url_m = self._search_regex(
- r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
- fatal=False)
- if smil_url_m is not None:
- smil_url = smil_url_m
-
- try:
- smil_xml = self._download_webpage(smil_url, video_id,
- 'Downloading SMIL info')
- formats.extend(self._formats_from_smil(smil_xml))
- except ExtractorError as ee:
- if not isinstance(ee.cause, compat_HTTPError):
- raise
- self._downloader.report_warning(
- 'Cannot download SMIL information, falling back to JSON ..')
+ # Download via HLS API
+ formats.extend(self._download_api_formats(video_id))
self._sort_formats(formats)
timestamp_ms = int(self._search_regex(
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class VGTVIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?vgtv\.no/#!/(?:.*)/(?P<id>[0-9]+)'
+ _TESTS = [
+ {
+ # streamType: vod
+ 'url': 'http://www.vgtv.no/#!/video/84196/hevnen-er-soet-episode-10-abu',
+ 'md5': 'b8be7a234cebb840c0d512c78013e02f',
+ 'info_dict': {
+ 'id': '84196',
+ 'ext': 'mp4',
+ 'title': 'Hevnen er søt episode 1:10 - Abu',
+ 'description': 'md5:e25e4badb5f544b04341e14abdc72234',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 648.000,
+ 'timestamp': 1404626400,
+ 'upload_date': '20140706',
+ 'view_count': int,
+ },
+ },
+ {
+ # streamType: wasLive
+ 'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
+ 'info_dict': {
+ 'id': '100764',
+ 'ext': 'flv',
+ 'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
+ 'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 9056.000,
+ 'timestamp': 1410113864,
+ 'upload_date': '20140907',
+ 'view_count': int,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ },
+ {
+ # streamType: live
+ 'url': 'http://www.vgtv.no/#!/live/100015/direkte-her-kan-du-se-laksen-live-fra-suldalslaagen',
+ 'info_dict': {
+ 'id': '100015',
+ 'ext': 'flv',
+ 'title': 'DIREKTE: Her kan du se laksen live fra Suldalslågen!',
+ 'description': 'md5:9a60cc23fa349f761628924e56eeec2d',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 0,
+ 'timestamp': 1407423348,
+ 'upload_date': '20140807',
+ 'view_count': int,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ },
+ ]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ data = self._download_json(
+ 'http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id,
+ video_id, 'Downloading media JSON')
+
+ streams = data['streamUrls']
+
+ formats = []
+
+ hls_url = streams.get('hls')
+ if hls_url:
+ formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4'))
+
+ hds_url = streams.get('hds')
+ if hds_url:
+ formats.extend(self._extract_f4m_formats(hds_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', video_id))
+
+ mp4_url = streams.get('mp4')
+ if mp4_url:
+ _url = hls_url or hds_url
+ MP4_URL_TEMPLATE = '%s/%%s.%s' % (mp4_url.rpartition('/')[0], mp4_url.rpartition('.')[-1])
+ for mp4_format in _url.split(','):
+ m = re.search('(?P<width>\d+)_(?P<height>\d+)_(?P<vbr>\d+)', mp4_format)
+ if not m:
+ continue
+ width = int(m.group('width'))
+ height = int(m.group('height'))
+ vbr = int(m.group('vbr'))
+ formats.append({
+ 'url': MP4_URL_TEMPLATE % mp4_format,
+ 'format_id': 'mp4-%s' % vbr,
+ 'width': width,
+ 'height': height,
+ 'vbr': vbr,
+ 'preference': 1,
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': data['title'],
+ 'description': data['description'],
+ 'thumbnail': data['images']['main'] + '?t[]=900x506q80',
+ 'timestamp': data['published'],
+ 'duration': float_or_none(data['duration'], 1000),
+ 'view_count': data['displays'],
+ 'formats': formats,
+ }
\ No newline at end of file
-import json
-import re
+from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import (
+ float_or_none,
+ int_or_none,
+)
class ViddlerIE(InfoExtractor):
- _VALID_URL = r'(?P<domain>https?://(?:www\.)?viddler\.com)/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
+ _VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
_TEST = {
- u"url": u"http://www.viddler.com/v/43903784",
- u'file': u'43903784.mp4',
- u'md5': u'fbbaedf7813e514eb7ca30410f439ac9',
- u'info_dict': {
- u"title": u"Video Made Easy",
- u"uploader": u"viddler",
- u"duration": 100.89,
+ "url": "http://www.viddler.com/v/43903784",
+ 'md5': 'ae43ad7cb59431ce043f0ff7fa13cbf4',
+ 'info_dict': {
+ 'id': '43903784',
+ 'ext': 'mp4',
+ "title": "Video Made Easy",
+ 'description': 'You don\'t need to be a professional to make high-quality video content. Viddler provides some quick and easy tips on how to produce great video content with limited resources. ',
+ "uploader": "viddler",
+ 'timestamp': 1335371429,
+ 'upload_date': '20120425',
+ "duration": 100.89,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'view_count': int,
+ 'categories': ['video content', 'high quality video', 'video made easy', 'how to produce video with limited resources', 'viddler'],
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- embed_url = mobj.group('domain') + u'/embed/' + video_id
- webpage = self._download_webpage(embed_url, video_id)
-
- video_sources_code = self._search_regex(
- r"(?ms)sources\s*:\s*(\{.*?\})", webpage, u'video URLs')
- video_sources = json.loads(video_sources_code.replace("'", '"'))
-
- formats = [{
- 'url': video_url,
- 'format': format_id,
- } for video_url, format_id in video_sources.items()]
-
- title = self._html_search_regex(
- r"title\s*:\s*'([^']*)'", webpage, u'title')
- uploader = self._html_search_regex(
- r"authorName\s*:\s*'([^']*)'", webpage, u'uploader', fatal=False)
- duration_s = self._html_search_regex(
- r"duration\s*:\s*([0-9.]*)", webpage, u'duration', fatal=False)
- duration = float(duration_s) if duration_s else None
- thumbnail = self._html_search_regex(
- r"thumbnail\s*:\s*'([^']*)'",
- webpage, u'thumbnail', fatal=False)
+ video_id = self._match_id(url)
+
+ json_url = (
+ 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
+ video_id)
+ data = self._download_json(json_url, video_id)['video']
+
+ formats = []
+ for filed in data['files']:
+ if filed.get('status', 'ready') != 'ready':
+ continue
+ f = {
+ 'format_id': filed['profile_id'],
+ 'format_note': filed['profile_name'],
+ 'url': self._proto_relative_url(filed['url']),
+ 'width': int_or_none(filed.get('width')),
+ 'height': int_or_none(filed.get('height')),
+ 'filesize': int_or_none(filed.get('size')),
+ 'ext': filed.get('ext'),
+ 'source_preference': -1,
+ }
+ formats.append(f)
+
+ if filed.get('cdn_url'):
+ f = f.copy()
+ f['url'] = self._proto_relative_url(filed['cdn_url'])
+ f['format_id'] = filed['profile_id'] + '-cdn'
+ f['source_preference'] = 1
+ formats.append(f)
+
+ if filed.get('html5_video_source'):
+ f = f.copy()
+ f['url'] = self._proto_relative_url(
+ filed['html5_video_source'])
+ f['format_id'] = filed['profile_id'] + '-html5'
+ f['source_preference'] = 0
+ formats.append(f)
+ self._sort_formats(formats)
+
+ categories = [
+ t.get('text') for t in data.get('tags', []) if 'text' in t]
return {
'_type': 'video',
'id': video_id,
- 'title': title,
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'duration': duration,
+ 'title': data['title'],
'formats': formats,
+ 'description': data.get('description'),
+ 'timestamp': int_or_none(data.get('upload_time')),
+ 'thumbnail': self._proto_relative_url(data.get('thumbnail_url')),
+ 'uploader': data.get('author'),
+ 'duration': float_or_none(data.get('length')),
+ 'view_count': int_or_none(data.get('view_count')),
+ 'categories': categories,
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ remove_start,
+)
+
+
+class VideoMegaIE(InfoExtractor):
+ _VALID_URL = r'''(?x)https?://
+ (?:www\.)?videomega\.tv/
+ (?:iframe\.php)?\?ref=(?P<id>[A-Za-z0-9]+)
+ '''
+ _TEST = {
+ 'url': 'http://videomega.tv/?ref=GKeGPVedBe',
+ 'md5': '240fb5bcf9199961f48eb17839b084d6',
+ 'info_dict': {
+ 'id': 'GKeGPVedBe',
+ 'ext': 'mp4',
+ 'title': 'XXL - All Sports United',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
+ webpage = self._download_webpage(url, video_id)
+
+ escaped_data = self._search_regex(
+ r'unescape\("([^"]+)"\)', webpage, 'escaped data')
+ playlist = compat_urllib_parse.unquote(escaped_data)
+
+ thumbnail = self._search_regex(
+ r'image:\s*"([^"]+)"', playlist, 'thumbnail', fatal=False)
+ url = self._search_regex(r'file:\s*"([^"]+)"', playlist, 'URL')
+ title = remove_start(self._html_search_regex(
+ r'<title>(.*?)</title>', webpage, 'title'), 'VideoMega.tv - ')
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ }]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ }
--- /dev/null
+#coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class VidziIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?vidzi\.tv/(?P<id>\w+)'
+ _TEST = {
+ 'url': 'http://vidzi.tv/cghql9yq6emu.html',
+ 'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',
+ 'info_dict': {
+ 'id': 'cghql9yq6emu',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video 1\\\\2\'3/4<5\\\\6ä7↭',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+ video_url = self._html_search_regex(
+ r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
+ title = self._html_search_regex(
+ r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': video_url,
+ }
+
\ No newline at end of file
compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
- clean_html,
- get_element_by_attribute,
+ compat_urlparse,
ExtractorError,
+ InAdvancePagedList,
+ int_or_none,
RegexNotFoundError,
std_headers,
unsmuggle_url,
urlencode_postdata,
- int_or_none,
)
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
- (?P<proto>(?:https?:)?//)?
+ https?://
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
+ (?!channels/[^/?#]+/?(?:$|[?#])|album/)
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
(?:videos?/)?
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
+ 'description': 'md5:380943ec71b89736ff4bf27183233d09',
'duration': 1595,
},
},
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
+ 'description': None,
},
},
{
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
+ 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
},
'params': {
'videopassword': 'youtube-dl',
'duration': 62,
}
},
+ {
+ # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
+ 'url': 'https://player.vimeo.com/video/98044508',
+ 'note': 'The js code contains assignments to the same variable as the config',
+ 'info_dict': {
+ 'id': '98044508',
+ 'ext': 'mp4',
+ 'title': 'Pier Solar OUYA Official Trailer',
+ 'uploader': 'Tulio Gonçalves',
+ 'uploader_id': 'user28849593',
+ },
+ },
]
- @classmethod
- def suitable(cls, url):
- if VimeoChannelIE.suitable(url):
- # Otherwise channel urls like http://vimeo.com/channels/31259 would
- # match
- return False
- else:
- return super(VimeoIE, cls).suitable(url)
-
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None)
if password is None:
if data is not None:
headers = headers.copy()
headers.update(data)
+ if 'Referer' not in headers:
+ headers['Referer'] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
+ orig_url = url
if mobj.group('pro') or mobj.group('player'):
url = 'http://player.vimeo.com/video/' + video_id
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search('(\w)\.video\.id', webpage)
if m_variable_name is not None:
- config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
+ config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config = self._search_regex(config_re, webpage, 'info section',
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video description
- video_description = None
- try:
- video_description = get_element_by_attribute("class", "description_wrapper", webpage)
- if video_description:
- video_description = clean_html(video_description)
- except AssertionError as err:
- # On some pages like (http://player.vimeo.com/video/54469442) the
- # html tags are not closed, python 2.6 cannot handle it
- if err.args[0] == 'we should not get here!':
- pass
- else:
- raise
+
+ video_description = self._html_search_regex(
+ r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
+ webpage, 'description', default=None)
+ if not video_description:
+ video_description = self._html_search_meta(
+ 'description', webpage, default=None)
+ if not video_description and mobj.group('pro'):
+ orig_webpage = self._download_webpage(
+ orig_url, video_id,
+ note='Downloading webpage for description',
+ fatal=False)
+ if orig_webpage:
+ video_description = self._html_search_meta(
+ 'description', orig_webpage, default=None)
+ if not video_description and not mobj.group('player'):
+ self._downloader.report_warning('Cannot find video description')
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
class VimeoChannelIE(InfoExtractor):
IE_NAME = 'vimeo:channel'
- _VALID_URL = r'(?:https?://)?vimeo\.com/channels/(?P<id>[^/]+)/?(\?.*)?$'
+ _VALID_URL = r'https?://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
+ _TESTS = [{
+ 'url': 'http://vimeo.com/channels/tributes',
+ 'info_dict': {
+ 'title': 'Vimeo Tributes',
+ },
+ 'playlist_mincount': 25,
+ }]
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
- _VALID_URL = r'(?:https?://)?vimeo\.com/(?P<name>[^/]+)(?:/videos|[#?]|$)'
+ _VALID_URL = r'https?://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
-
- @classmethod
- def suitable(cls, url):
- if VimeoChannelIE.suitable(url) or VimeoIE.suitable(url) or VimeoAlbumIE.suitable(url) or VimeoGroupsIE.suitable(url):
- return False
- return super(VimeoUserIE, cls).suitable(url)
+ _TESTS = [{
+ 'url': 'http://vimeo.com/nkistudio/videos',
+ 'info_dict': {
+ 'title': 'Nki',
+ },
+ 'playlist_mincount': 66,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
- _VALID_URL = r'(?:https?://)?vimeo\.com/album/(?P<id>\d+)'
+ _VALID_URL = r'https?://vimeo\.com/album/(?P<id>\d+)'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
+ _TESTS = [{
+ 'url': 'http://vimeo.com/album/2632481',
+ 'info_dict': {
+ 'title': 'Staff Favorites: November 2013',
+ },
+ 'playlist_mincount': 13,
+ }]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
class VimeoGroupsIE(VimeoAlbumIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
+ _TESTS = [{
+ 'url': 'http://vimeo.com/groups/rolexawards',
+ 'info_dict': {
+ 'title': 'Rolex Awards for Enterprise',
+ },
+ 'playlist_mincount': 73,
+ }]
def _extract_list_title(self, webpage):
return self._og_search_title(webpage)
class VimeoReviewIE(InfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
- _VALID_URL = r'(?:https?://)?vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
- _TEST = {
+ _VALID_URL = r'https?://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
+ _TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'file': '75524534.mp4',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
}
- }
+ }, {
+ 'note': 'video player needs Referer',
+ 'url': 'http://vimeo.com/user22258446/review/91613211/13f927e053',
+ 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
+ 'info_dict': {
+ 'id': '91613211',
+ 'ext': 'mp4',
+ 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
+ 'uploader': 'DevWeek Events',
+ 'duration': 2773,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
_VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
_LOGIN_REQUIRED = True
_TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
+ _TESTS = [{
+ 'url': 'http://vimeo.com/home/watchlater',
+ 'only_matching': True,
+ }]
def _real_initialize(self):
self._login()
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
+
+
+class VimeoLikesIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
+ IE_NAME = 'vimeo:likes'
+ IE_DESC = 'Vimeo user likes'
+ _TEST = {
+ 'url': 'https://vimeo.com/user755559/likes/',
+ 'playlist_mincount': 293,
+ "info_dict": {
+ "description": "See all the videos urza likes",
+ "title": 'Videos urza likes',
+ },
+ }
+
+ def _real_extract(self, url):
+ user_id = self._match_id(url)
+ webpage = self._download_webpage(url, user_id)
+ page_count = self._int(
+ self._search_regex(
+ r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
+ .*?</a></li>\s*<li\s+class="pagination_next">
+ ''', webpage, 'page count'),
+ 'page count', fatal=True)
+ PAGE_SIZE = 12
+ title = self._html_search_regex(
+ r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
+ description = self._html_search_meta('description', webpage)
+
+ def _get_page(idx):
+ page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
+ self.http_scheme(), user_id, idx + 1)
+ webpage = self._download_webpage(
+ page_url, user_id,
+ note='Downloading page %d/%d' % (idx + 1, page_count))
+ video_list = self._search_regex(
+ r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
+ webpage, 'video content')
+ paths = re.findall(
+ r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
+ for path in paths:
+ yield {
+ '_type': 'url',
+ 'url': compat_urlparse.urljoin(page_url, path),
+ }
+
+ pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
+
+ return {
+ '_type': 'playlist',
+ 'id': 'user%s_likes' % user_id,
+ 'title': title,
+ 'description': description,
+ 'entries': pl,
+ }
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<user>[^/]+)/?(\?.*)?$'
_VINE_BASE_URL = "https://vine.co/"
+ _TEST = {
+ 'url': 'https://vine.co/Visa',
+ 'info_dict': {
+ 'id': 'Visa',
+ },
+ 'playlist_mincount': 46,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
info_page = self._download_webpage(info_url, video_id)
- if re.search(r'<!>Please log in or <', info_page):
- raise ExtractorError('This video is only available for registered users, '
- 'use --username and --password options to provide account credentials.', expected=True)
+ ERRORS = {
+ r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
+ 'Video %s has been removed from public access due to rightholder complaint.',
+ r'<!>Please log in or <':
+ 'Video %s is only available for registered users, '
+ 'use --username and --password options to provide account credentials.',
+ '<!>Unknown error':
+ 'Video %s does not exist.'
+ }
+
+ for error_re, error_msg in ERRORS.items():
+ if re.search(error_re, info_page):
+ raise ExtractorError(error_msg % video_id, expected=True)
m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
if m_yt is not None:
req, video_id, 'Downloading video page')
title = self._search_regex(
- r'id="file_title".*?>\s*(.*?)\s*<span', webpage, 'title')
+ r'id="file_title".*?>\s*(.*?)\s*<(?:br|span)', webpage, 'title')
thumbnail = self._search_regex(
r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail')
url = self._search_regex(
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ str_to_int,
+)
+
+
+class VpornIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?vporn\.com/[^/]+/(?P<display_id>[^/]+)/(?P<id>\d+)'
+ _TESTS = [
+ {
+ 'url': 'http://www.vporn.com/masturbation/violet-on-her-th-birthday/497944/',
+ 'md5': 'facf37c1b86546fa0208058546842c55',
+ 'info_dict': {
+ 'id': '497944',
+ 'display_id': 'violet-on-her-th-birthday',
+ 'ext': 'mp4',
+ 'title': 'Violet on her 19th birthday',
+ 'description': 'Violet dances in front of the camera which is sure to get you horny.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'kileyGrope',
+ 'categories': ['Masturbation', 'Teen'],
+ 'duration': 393,
+ 'age_limit': 18,
+ 'view_count': int,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ }
+ },
+ {
+ 'url': 'http://www.vporn.com/female/hana-shower/523564/',
+ 'md5': 'ced35a4656198a1664cf2cda1575a25f',
+ 'info_dict': {
+ 'id': '523564',
+ 'display_id': 'hana-shower',
+ 'ext': 'mp4',
+ 'title': 'Hana Shower',
+ 'description': 'Hana showers at the bathroom.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Hmmmmm',
+ 'categories': ['Big Boobs', 'Erotic', 'Teen', 'Female'],
+ 'duration': 588,
+ 'age_limit': 18,
+ 'view_count': int,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._html_search_regex(
+ r'videoname\s*=\s*\'([^\']+)\'', webpage, 'title').strip()
+ description = self._html_search_regex(
+ r'<div class="description_txt">(.*?)</div>', webpage, 'description', fatal=False)
+ thumbnail = self._html_search_regex(
+ r'flashvars\.imageUrl\s*=\s*"([^"]+)"', webpage, 'description', fatal=False, default=None)
+ if thumbnail:
+ thumbnail = 'http://www.vporn.com' + thumbnail
+
+ uploader = self._html_search_regex(
+ r'(?s)UPLOADED BY.*?<a href="/user/[^"]+">([^<]+)</a>',
+ webpage, 'uploader', fatal=False)
+
+ categories = re.findall(r'<a href="/cat/[^"]+">([^<]+)</a>', webpage)
+
+ duration = parse_duration(self._search_regex(
+ r'duration (\d+ min \d+ sec)', webpage, 'duration', fatal=False))
+
+ view_count = str_to_int(self._html_search_regex(
+ r'<span>([\d,\.]+) VIEWS</span>', webpage, 'view count', fatal=False))
+ like_count = str_to_int(self._html_search_regex(
+ r'<span id="like" class="n">([\d,\.]+)</span>', webpage, 'like count', fatal=False))
+ dislike_count = str_to_int(self._html_search_regex(
+ r'<span id="dislike" class="n">([\d,\.]+)</span>', webpage, 'dislike count', fatal=False))
+ comment_count = str_to_int(self._html_search_regex(
+ r'<h4>Comments \(<b>([\d,\.]+)</b>\)</h4>', webpage, 'comment count', fatal=False))
+
+ formats = []
+
+ for video in re.findall(r'flashvars\.videoUrl([^=]+?)\s*=\s*"(https?://[^"]+)"', webpage):
+ video_url = video[1]
+ fmt = {
+ 'url': video_url,
+ 'format_id': video[0],
+ }
+ m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)_(?P<vbr>\d+)k\.mp4$', video_url)
+ if m:
+ fmt.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ 'vbr': int(m.group('vbr')),
+ })
+ formats.append(fmt)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'categories': categories,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'like_count': like_count,
+ 'dislike_count': dislike_count,
+ 'comment_count': comment_count,
+ 'age_limit': 18,
+ 'formats': formats,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class VRTIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:deredactie|sporza|cobra)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
+ _TESTS = [
+ # deredactie.be
+ {
+ 'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL',
+ 'md5': '4cebde1eb60a53782d4f3992cbd46ec8',
+ 'info_dict': {
+ 'id': '2129880',
+ 'ext': 'flv',
+ 'title': 'Het journaal L - 25/10/14',
+ 'description': None,
+ 'timestamp': 1414271750.949,
+ 'upload_date': '20141025',
+ 'duration': 929,
+ }
+ },
+ # sporza.be
+ {
+ 'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time',
+ 'md5': '11f53088da9bf8e7cfc42456697953ff',
+ 'info_dict': {
+ 'id': '2124639',
+ 'ext': 'flv',
+ 'title': 'Bekijk Extra Time van 20 oktober',
+ 'description': 'md5:83ac5415a4f1816c6a93f8138aef2426',
+ 'timestamp': 1413835980.560,
+ 'upload_date': '20141020',
+ 'duration': 3238,
+ }
+ },
+ # cobra.be
+ {
+ 'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari',
+ 'md5': '78a2b060a5083c4f055449a72477409d',
+ 'info_dict': {
+ 'id': '2126050',
+ 'ext': 'flv',
+ 'title': 'Bret Easton Ellis in Café Corsari',
+ 'description': 'md5:f699986e823f32fd6036c1855a724ee9',
+ 'timestamp': 1413967500.494,
+ 'upload_date': '20141022',
+ 'duration': 661,
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_id = self._search_regex(
+ r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
+
+ formats = []
+ mobj = re.search(
+ r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
+ webpage)
+ if mobj:
+ formats.extend(self._extract_m3u8_formats(
+ '%s/%s' % (mobj.group('server'), mobj.group('path')),
+ video_id, 'mp4'))
+ mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
+ if mobj:
+ formats.extend(self._extract_f4m_formats(
+ '%s/manifest.f4m' % mobj.group('src'), video_id))
+ self._sort_formats(formats)
+
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage, default=None)
+ thumbnail = self._og_search_thumbnail(webpage)
+ timestamp = float_or_none(self._search_regex(
+ r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000)
+ duration = float_or_none(self._search_regex(
+ r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ }
\ No newline at end of file
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ compat_str,
+ ExtractorError,
+)
class VubeIE(InfoExtractor):
_TESTS = [
{
+ 'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
+ 'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
+ 'info_dict': {
+ 'id': 'Y8NUZ69Tf7',
+ 'ext': 'mp4',
+ 'title': 'Best Drummer Ever [HD]',
+ 'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'William',
+ 'timestamp': 1406876915,
+ 'upload_date': '20140801',
+ 'duration': 258.051,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ 'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
+ },
+ }, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
'like_count': int,
'dislike_count': int,
'comment_count': int,
- }
+ 'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
+ },
+ 'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'like_count': int,
'dislike_count': int,
'comment_count': int,
- }
+ 'categories': ['seraina', 'jessica', 'krewella', 'alive'],
+ },
+ 'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
'ext': 'mp4',
'title': 'Frozen - Let It Go Cover by Siren Gene',
'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
- 'uploader': 'Siren Gene',
- 'uploader_id': 'Siren',
'thumbnail': 're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
+ 'uploader': 'Siren',
+ 'timestamp': 1395448018,
+ 'upload_date': '20140322',
'duration': 221.788,
'like_count': int,
'dislike_count': int,
'comment_count': int,
- }
+ 'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
+ },
+ 'skip': 'Removed due to DMCA',
}
]
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
- data_json = self._search_regex(
- r'(?s)window\["(?:tapiVideoData|vubeOriginalVideoData)"\]\s*=\s*(\{.*?\n});\n',
- webpage, 'video data'
- )
- data = json.loads(data_json)
- video = (
- data.get('video') or
- data)
- assert isinstance(video, dict)
+ video = self._download_json(
+ 'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id']
- formats = [
- {
- 'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (fmt['media_resolution_id'], public_id),
- 'height': int(fmt['height']),
- 'abr': int(fmt['audio_bitrate']),
- 'vbr': int(fmt['video_bitrate']),
- 'format_id': fmt['media_resolution_id']
- } for fmt in video['mtm'] if fmt['transcoding_status'] == 'processed'
- ]
+ formats = []
+
+ for media in video['media'].get('video', []) + video['media'].get('audio', []):
+ if media['transcoding_status'] != 'processed':
+ continue
+ fmt = {
+ 'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
+ 'abr': int(media['audio_bitrate']),
+ 'format_id': compat_str(media['media_resolution_id']),
+ }
+ vbr = int(media['video_bitrate'])
+ if vbr:
+ fmt.update({
+ 'vbr': vbr,
+ 'height': int(media['height']),
+ })
+ formats.append(fmt)
self._sort_formats(formats)
+ if not formats and video.get('vst') == 'dmca':
+ raise ExtractorError(
+ 'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
+ expected=True)
+
title = video['title']
description = video.get('description')
- thumbnail = self._proto_relative_url(
- video.get('thumbnail') or video.get('thumbnail_src'),
- scheme='http:')
- uploader = data.get('user', {}).get('channel', {}).get('name') or video.get('user_alias')
- uploader_id = data.get('user', {}).get('name')
+ thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
+ uploader = video.get('user_alias') or video.get('channel')
timestamp = int_or_none(video.get('upload_time'))
duration = video['duration']
view_count = video.get('raw_view_count')
- like_count = video.get('rlikes')
- if like_count is None:
- like_count = video.get('total_likes')
- dislike_count = video.get('rhates')
- if dislike_count is None:
- dislike_count = video.get('total_hates')
+ like_count = video.get('total_likes')
+ dislike_count = video.get('total_hates')
comments = video.get('comments')
comment_count = None
else:
comment_count = len(comments)
+ categories = [tag['text'] for tag in video['tags']]
+
return {
'id': video_id,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
- 'uploader_id': uploader_id,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
+ 'categories': categories,
}
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse_urlparse,
+ ExtractorError,
parse_duration,
qualities,
)
_VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
_TEST = {
- 'url': 'http://m.vuclip.com/w?cid=843902317&fid=63532&z=1007&nvar&frm=index.html&bu=4757321434',
- 'md5': '92ac9d1ccefec4f0bb474661ab144fcf',
+ 'url': 'http://m.vuclip.com/w?cid=922692425&fid=70295&z=1010&nvar&frm=index.html',
'info_dict': {
- 'id': '843902317',
+ 'id': '922692425',
'ext': '3gp',
- 'title': 'Movie Trailer: Noah',
- 'duration': 139,
+ 'title': 'The Toy Soldiers - Hollywood Movie Trailer',
+ 'duration': 180,
}
}
webpage = self._download_webpage(
adfree_url, video_id, note='Download post-ad page')
+ error_msg = self._html_search_regex(
+ r'<p class="message">(.*?)</p>', webpage, 'error message',
+ default=None)
+ if error_msg:
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, error_msg), expected=True)
+
+ # These clowns alternate between two page types
links_code = self._search_regex(
- r'(?s)<div class="social align_c".*?>(.*?)<hr\s*/?>', webpage,
- 'links')
+ r'''(?xs)
+ (?:
+ <img\s+src="/im/play.gif".*?>|
+ <!--\ player\ end\ -->\s*</div><!--\ thumb\ end-->
+ )
+ (.*?)
+ (?:
+ <a\s+href="fblike|<div\s+class="social">
+ )
+ ''', webpage, 'links')
title = self._html_search_regex(
r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
quality_order = qualities(['Reg', 'Hi'])
formats = []
for url, q in re.findall(
- r'<a href="(?P<url>[^"]+)".*?>(?P<q>[^<]+)</a>', links_code):
+ r'<a\s+href="(?P<url>[^"]+)".*?>(?:<button[^>]*>)?(?P<q>[^<]+)(?:</button>)?</a>', links_code):
format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
formats.append({
'format_id': format_id,
self._sort_formats(formats)
duration = parse_duration(self._search_regex(
- r'\(([0-9:]+)\)</span></h1>', webpage, 'duration', fatal=False))
+ r'\(([0-9:]+)\)</span>', webpage, 'duration', fatal=False))
return {
'id': video_id,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .subtitles import SubtitlesInfoExtractor
+from ..utils import (
+ xpath_text,
+ int_or_none,
+)
+
+
+class WallaIE(SubtitlesInfoExtractor):
+ _VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
+ _TEST = {
+ 'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
+ 'info_dict': {
+ 'id': '2642630',
+ 'display_id': 'one-direction-all-for-one',
+ 'ext': 'flv',
+ 'title': 'וואן דיירקשן: ההיסטריה',
+ 'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 3600,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }
+
+ _SUBTITLE_LANGS = {
+ 'עברית': 'heb',
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ video = self._download_xml(
+ 'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
+ display_id)
+
+ item = video.find('./items/item')
+
+ title = xpath_text(item, './title', 'title')
+ description = xpath_text(item, './synopsis', 'description')
+ thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
+ duration = int_or_none(xpath_text(item, './duration', 'duration'))
+
+ subtitles = {}
+ for subtitle in item.findall('./subtitles/subtitle'):
+ lang = xpath_text(subtitle, './title')
+ subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src')
+
+ if self._downloader.params.get('listsubtitles', False):
+ self._list_available_subtitles(video_id, subtitles)
+ return
+
+ subtitles = self.extract_subtitles(video_id, subtitles)
+
+ formats = []
+ for quality in item.findall('./qualities/quality'):
+ format_id = xpath_text(quality, './title')
+ fmt = {
+ 'url': 'rtmp://wafla.walla.co.il/vod',
+ 'play_path': xpath_text(quality, './src'),
+ 'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
+ 'page_url': url,
+ 'ext': 'flv',
+ 'format_id': xpath_text(quality, './title'),
+ }
+ m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
+ if m:
+ fmt['height'] = int(m.group('height'))
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
_VALID_URL = r'^https?://(?:www\.)?washingtonpost\.com/.*?/(?P<id>[^/]+)/(?:$|[?#])'
_TEST = {
'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/',
+ 'info_dict': {
+ 'title': 'Sinkhole of bureaucracy',
+ },
'playlist': [{
'md5': 'c3f4b4922ffa259243f68e928db2db8c',
'info_dict': {
from __future__ import unicode_literals
import re
+import hashlib
from .common import InfoExtractor
from ..utils import (
+ ExtractorError,
unified_strdate,
)
class WatIE(InfoExtractor):
- _VALID_URL = r'http://www\.wat\.tv/.*-(?P<shortID>.*?)_.*?\.html'
+ _VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html'
IE_NAME = 'wat.tv'
- _TEST = {
- 'url': 'http://www.wat.tv/video/world-war-philadelphia-vost-6bv55_2fjr7_.html',
- 'info_dict': {
- 'id': '10631273',
- 'ext': 'mp4',
- 'title': 'World War Z - Philadelphia VOST',
- 'description': 'La menace est partout. Que se passe-t-il à Philadelphia ?\r\nWORLD WAR Z, avec Brad Pitt, au cinéma le 3 juillet.\r\nhttp://www.worldwarz.fr',
+ _TESTS = [
+ {
+ 'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
+ 'md5': 'ce70e9223945ed26a8056d413ca55dc9',
+ 'info_dict': {
+ 'id': '11713067',
+ 'display_id': 'soupe-figues-l-orange-aux-epices',
+ 'ext': 'mp4',
+ 'title': 'Soupe de figues à l\'orange et aux épices',
+ 'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
+ 'upload_date': '20140819',
+ 'duration': 120,
+ },
},
- 'params': {
- # Sometimes wat serves the whole file with the --test option
- 'skip_download': True,
+ {
+ 'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
+ 'md5': 'fbc84e4378165278e743956d9c1bf16b',
+ 'info_dict': {
+ 'id': '11713075',
+ 'display_id': 'gregory-lemarchal-voix-ange',
+ 'ext': 'mp4',
+ 'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
+ 'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3',
+ 'upload_date': '20140816',
+ 'duration': 2910,
+ },
+ 'skip': "Ce contenu n'est pas disponible pour l'instant.",
},
- }
+ ]
def download_video_info(self, real_id):
# 'contentv4' is used in the website, but it also returns the related
def real_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
mobj = re.match(self._VALID_URL, url)
- short_id = mobj.group('shortID')
- webpage = self._download_webpage(url, short_id)
+ short_id = mobj.group('short_id')
+ display_id = mobj.group('display_id')
+ webpage = self._download_webpage(url, display_id or short_id)
real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
video_info = self.download_video_info(real_id)
+
+ error_desc = video_info.get('error_desc')
+ if error_desc:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error_desc), expected=True)
+
+ geo_list = video_info.get('geoList')
+ country = geo_list[0] if geo_list else ''
+
chapters = video_info['chapters']
first_chapter = chapters[0]
+ files = video_info['files']
+ first_file = files[0]
if real_id_for_chapter(first_chapter) != real_id:
self.to_screen('Multipart video detected')
upload_date = unified_strdate(first_chapter['date_diffusion'])
# Otherwise we can continue and extract just one part, we have to use
# the short id for getting the video url
+
+ formats = [{
+ 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
+ 'format_id': 'Mobile',
+ }]
+
+ fmts = [('SD', 'web')]
+ if first_file.get('hasHD'):
+ fmts.append(('HD', 'webhd'))
+
+ def compute_token(param):
+ timestamp = '%08x' % int(self._download_webpage(
+ 'http://www.wat.tv/servertime', real_id,
+ 'Downloading server time').split('|')[0])
+ magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
+ return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp)
+
+ for fmt in fmts:
+ webid = '/%s/%s' % (fmt[1], real_id)
+ video_url = self._download_webpage(
+ 'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country),
+ real_id,
+ 'Downloding %s video URL' % fmt[0],
+ 'Failed to download %s video URL' % fmt[0],
+ False)
+ if not video_url:
+ continue
+ formats.append({
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'format_id': fmt[0],
+ })
+
return {
'id': real_id,
- 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
+ 'display_id': display_id,
'title': first_chapter['title'],
'thumbnail': first_chapter['preview'],
'description': first_chapter['description'],
'view_count': video_info['views'],
'upload_date': upload_date,
+ 'duration': first_file['duration'],
+ 'formats': formats,
}
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class WayOfTheMasterIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.wayofthemaster\.com/([^/?#]*/)*(?P<id>[^/?#]+)\.s?html(?:$|[?#])'
+
+ _TEST = {
+ 'url': 'http://www.wayofthemaster.com/hbks.shtml',
+ 'md5': '5316b57487ada8480606a93cb3d18d24',
+ 'info_dict': {
+ 'id': 'hbks',
+ 'ext': 'mp4',
+ 'title': 'Intelligent Design vs. Evolution',
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._search_regex(
+ r'<img src="images/title_[^"]+".*?alt="([^"]+)"',
+ webpage, 'title', default=None)
+ if title is None:
+ title = self._html_search_regex(
+ r'<title>(.*?)</title>', webpage, 'page title')
+
+ url_base = self._search_regex(
+ r'<param\s+name="?movie"?\s+value=".*?/wotm_videoplayer_highlow[0-9]*\.swf\?vid=([^"]+)"',
+ webpage, 'URL base')
+ formats = [{
+ 'format_id': 'low',
+ 'quality': 1,
+ 'url': url_base + '_low.mp4',
+ }, {
+ 'format_id': 'high',
+ 'quality': 2,
+ 'url': url_base + '_high.mp4',
+ }]
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ }
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
- r's1\.addVariable\("file",\s*"([^"]+)"\);', webpage, 'video URL')
+ r"'file'\s*:\s*'([^']+)'", webpage, 'video URL')
if YoutubeIE.suitable(video_url):
self.to_screen('Found YouTube video')
return {
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
+from ..utils import ExtractorError, compat_urllib_request
class WistiaIE(InfoExtractor):
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
+ _API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
_TEST = {
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
- data_json = self._html_search_regex(
- r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
-
- data = json.loads(data_json)
+ request = compat_urllib_request.Request(self._API_URL.format(video_id))
+ request.add_header('Referer', url) # Some videos require this.
+ data_json = self._download_json(request, video_id)
+ if data_json.get('error'):
+ raise ExtractorError('Error while getting the playlist',
+ expected=True)
+ data = data_json['media']
formats = []
thumbnails = []
"info_dict": {
"id": "wshh6a7q1ny0G34ZwuIO",
"ext": "mp4",
- "title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+ "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
}
}
def _real_extract(self, url):
- m = re.match(self._VALID_URL, url)
- video_id = m.group('id')
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
- webpage_src = self._download_webpage(url, video_id)
-
- m_vevo_id = re.search(r'videoId=(.*?)&?',
- webpage_src)
+ m_vevo_id = re.search(r'videoId=(.*?)&?', webpage)
if m_vevo_id is not None:
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
video_url = self._search_regex(
- r'so\.addVariable\("file","(.*?)"\)', webpage_src, 'video URL')
+ r'so\.addVariable\("file","(.*?)"\)', webpage, 'video URL')
if 'youtube' in video_url:
return self.url_result(video_url, ie='Youtube')
video_title = self._html_search_regex(
- r"<title>(.*)</title>", webpage_src, 'title')
+ r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
+ webpage, 'title')
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
thumbnail = self._html_search_regex(
- r'rel="image_src" href="(.*)" />', webpage_src, 'thumbnail',
+ r'rel="image_src" href="(.*)" />', webpage, 'thumbnail',
fatal=False)
if not thumbnail:
- _title = r"""candytitles.*>(.*)</span>"""
- mobj = re.search(_title, webpage_src)
+ _title = r'candytitles.*>(.*)</span>'
+ mobj = re.search(_title, webpage)
if mobj is not None:
video_title = mobj.group(1)
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ parse_iso8601,
+ float_or_none,
+ int_or_none,
+)
+
+
+class XboxClipsIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?xboxclips\.com/video\.php\?.*vid=(?P<id>[\w-]{36})'
+ _TEST = {
+ 'url': 'https://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325',
+ 'md5': 'fbe1ec805e920aeb8eced3c3e657df5d',
+ 'info_dict': {
+ 'id': '074a69a9-5faf-46aa-b93b-9909c1720325',
+ 'ext': 'mp4',
+ 'title': 'Iabdulelah playing Upload Studio',
+ 'filesize_approx': 28101836.8,
+ 'timestamp': 1407388500,
+ 'upload_date': '20140807',
+ 'duration': 56,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url = self._html_search_regex(
+ r'>Link: <a href="([^"]+)">', webpage, 'video URL')
+ title = self._html_search_regex(
+ r'<title>XboxClips \| ([^<]+)</title>', webpage, 'title')
+ timestamp = parse_iso8601(self._html_search_regex(
+ r'>Recorded: ([^<]+)<', webpage, 'upload date', fatal=False))
+ filesize = float_or_none(self._html_search_regex(
+ r'>Size: ([\d\.]+)MB<', webpage, 'file size', fatal=False), invscale=1024 * 1024)
+ duration = int_or_none(self._html_search_regex(
+ r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False))
+ view_count = int_or_none(self._html_search_regex(
+ r'>Views: (\d+)<', webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'timestamp': timestamp,
+ 'filesize_approx': filesize,
+ 'duration': duration,
+ 'view_count': view_count,
+ }
class XHamsterIE(InfoExtractor):
"""Information Extractor for xHamster"""
- _VALID_URL = r'http://(?:www\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
+ _VALID_URL = r'http://(?:.+?\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
_TESTS = [
{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
- 'md5': '8281348b8d3c53d39fffb377d24eac4e',
'info_dict': {
'id': '1509445',
'ext': 'mp4',
},
{
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
- 'md5': '4cbd8d56708ecb4fb4124c23e4acb81a',
'info_dict': {
'id': '2221348',
'ext': 'mp4',
'id': 'kVTUy_G222_',
'ext': 'mp4',
'title': 'strange erotica',
- 'description': 'surreal gay themed erotica...almost an ET kind of thing',
+ 'description': 'http://www.xtube.com an ET kind of thing',
'uploader': 'greenshowers',
'duration': 450,
'age_limit': 18,
'age_limit': 18,
}
+
class XTubeUserIE(InfoExtractor):
IE_DESC = 'XTube user profile'
_VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])'
+ _TEST = {
+ 'url': 'http://www.xtube.com/community/profile.php?user=greenshowers',
+ 'info_dict': {
+ 'id': 'greenshowers',
+ },
+ 'playlist_mincount': 155,
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
+# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor, SearchInfoExtractor
from ..utils import (
+ ExtractorError,
compat_urllib_parse,
compat_urlparse,
clean_html,
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
- _VALID_URL = r'https?://(?:screen|movies)\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html'
+ _VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+?)-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
+ 'duration': 6863,
},
},
{
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
- 'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
+ 'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
+ 'duration': 151,
},
},
{
- 'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
- 'md5': '410b7104aa9893b765bc22787a22f3d9',
+ 'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
+ 'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
- 'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
+ 'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
- 'title': 'The World Loves Spider-Man',
- 'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
+ 'title': "Yahoo Saves 'Community'",
+ 'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
+ 'duration': 170,
}
+ },
+ {
+ 'url': 'https://tw.screen.yahoo.com/taipei-opinion-poll/選情站報-街頭民調-台北市篇-102823042.html',
+ 'md5': '92a7fdd8a08783c68a174d7aa067dde8',
+ 'info_dict': {
+ 'id': '7a23b569-7bea-36cb-85b9-bd5301a0a1fb',
+ 'ext': 'mp4',
+ 'title': '選情站報 街頭民調 台北市篇',
+ 'description': '選情站報 街頭民調 台北市篇',
+ 'duration': 429,
+ }
+ },
+ {
+ 'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
+ 'md5': '0b51660361f0e27c9789e7037ef76f4b',
+ 'info_dict': {
+ 'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
+ 'ext': 'mp4',
+ 'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
+ 'description': 'md5:f66c890e1490f4910a9953c941dee944',
+ 'duration': 97,
+ }
+ },
+ {
+ 'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
+ 'md5': '57e06440778b1828a6079d2f744212c4',
+ 'info_dict': {
+ 'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
+ 'ext': 'mp4',
+ 'title': 'Program that makes hockey more affordable not offered in Manitoba',
+ 'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
+ 'duration': 121,
+ }
+ }, {
+ 'url': 'https://ca.finance.yahoo.com/news/20-most-valuable-brands-world-112600775.html',
+ 'md5': '3e401e4eed6325aa29d9b96125fd5b4f',
+ 'info_dict': {
+ 'id': 'c1b4c09c-8ed8-3b65-8b05-169c55358a83',
+ 'ext': 'mp4',
+ 'title': "Apple Is The World's Most Valuable Brand",
+ 'description': 'md5:73eabc1a11c6f59752593b2ceefa1262',
+ 'duration': 21,
+ }
+ }, {
+ 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
+ 'md5': '67010fdf3a08d290e060a4dd96baa07b',
+ 'info_dict': {
+ 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
+ 'ext': 'mp4',
+ 'title': 'China Moses Is Crazy About the Blues',
+ 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
+ 'duration': 128,
+ }
+ }, {
+ 'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
+ 'md5': 'd9a083ccf1379127bf25699d67e4791b',
+ 'info_dict': {
+ 'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
+ 'ext': 'mp4',
+ 'title': 'Connect the Dots: Dark Side of Virgo',
+ 'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
+ 'duration': 201,
+ }
+ }, {
+ 'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
+ 'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
+ display_id = mobj.group('display_id')
+ url = mobj.group('url')
+ host = mobj.group('host')
+ webpage = self._download_webpage(url, display_id)
+
+ # Look for iframed media first
+ iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
+ if iframe_m:
+ iframepage = self._download_webpage(
+ host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
+ items_json = self._search_regex(
+ r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
+ if items_json:
+ items = json.loads(items_json)
+ video_id = items[0]['id']
+ return self._get_info(video_id, display_id, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
if items_json is None:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
- r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"'
+ r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
+ r'"first_videoid"\s*:\s*"([^"]+)"',
]
- long_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
- video_id = long_id
+ video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
- long_id = info['id']
- return self._get_info(long_id, video_id, webpage)
+ video_id = info['id']
+ return self._get_info(video_id, display_id, webpage)
- def _get_info(self, long_id, video_id, webpage):
+ def _get_info(self, video_id, display_id, webpage):
+ region = self._search_regex(
+ r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+ webpage, 'region', fatal=False, default='US')
query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
- ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="US"'
- ' AND protocol="http"' % long_id)
+ ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="%s"'
+ ' AND protocol="http"' % (video_id, region))
data = compat_urllib_parse.urlencode({
'q': query,
'env': 'prod',
})
query_result = self._download_json(
'http://video.query.yahoo.com/v1/public/yql?' + data,
- video_id, 'Downloading video info')
+ display_id, 'Downloading video info')
+
info = query_result['query']['results']['mediaObj'][0]
- meta = info['meta']
+ meta = info.get('meta')
+
+ if not meta:
+ msg = info['status'].get('msg')
+ if msg:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, msg), expected=True)
+ raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
return {
'id': video_id,
+ 'display_id': display_id,
'title': meta['title'],
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
+ 'duration': int_or_none(meta.get('duration')),
}
-class YahooNewsIE(YahooIE):
- IE_NAME = 'yahoo:news'
- _VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
-
- _TESTS = [{
- 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
- 'md5': '67010fdf3a08d290e060a4dd96baa07b',
- 'info_dict': {
- 'id': '104538833',
- 'ext': 'mp4',
- 'title': 'China Moses Is Crazy About the Blues',
- 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
- },
- }]
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
- long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
- return self._get_info(long_id, video_id, webpage)
-
-
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import compat_urllib_parse
+
+
+class YnetIE(InfoExtractor):
+ _VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
+ _TESTS = [
+ {
+ 'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
+ 'info_dict': {
+ 'id': 'L-11659-99244',
+ 'ext': 'flv',
+ 'title': 'איש לא יודע מאיפה באנו',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ }
+ }, {
+ 'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
+ 'info_dict': {
+ 'id': 'L-8859-84418',
+ 'ext': 'flv',
+ 'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
+ 'thumbnail': 're:^https?://.*\.jpg',
+ }
+ }
+ ]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage))
+ config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
+ f4m_url = config['clip']['url']
+ title = self._og_search_title(webpage)
+ m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
+ if m:
+ title = m.group('title')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': self._extract_f4m_formats(f4m_url, video_id),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ }
\ No newline at end of file
class YouJizzIE(InfoExtractor):
- _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
+ _VALID_URL = r'^https?://(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'file': '2189178.flv',
# coding: utf-8
-import json
+from __future__ import unicode_literals
+
import math
import random
import re
class YoukuIE(InfoExtractor):
- _VALID_URL = r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)'
- _TEST = {
- u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
- u"file": u"XNDgyMDQ2NTQw_part00.flv",
- u"md5": u"ffe3f2e435663dc2d1eea34faeff5b5b",
- u"params": {u"test": False},
- u"info_dict": {
- u"title": u"youtube-dl test video \"'/\\ä↭𝕐"
+ _VALID_URL = r'''(?x)
+ (?:
+ http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
+ youku:)
+ (?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
+ '''
+ _TEST = {
+ 'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
+ 'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
+ 'params': {
+ 'test': False
+ },
+ 'info_dict': {
+ 'id': 'XNDgyMDQ2NTQw_part00',
+ 'ext': 'flv',
+ 'title': 'youtube-dl test video "\'/\\ä↭𝕐'
}
}
-
def _gen_sid(self):
nowTime = int(time.time() * 1000)
random1 = random.randint(1000,1998)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- video_id = mobj.group('ID')
+ video_id = mobj.group('id')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
- jsondata = self._download_webpage(info_url, video_id)
-
- self.report_extraction(video_id)
- try:
- config = json.loads(jsondata)
- error_code = config['data'][0].get('error_code')
- if error_code:
- # -8 means blocked outside China.
- error = config['data'][0].get('error') # Chinese and English, separated by newline.
- raise ExtractorError(error or u'Server reported error %i' % error_code,
- expected=True)
-
- video_title = config['data'][0]['title']
- seed = config['data'][0]['seed']
-
- format = self._downloader.params.get('format', None)
- supported_format = list(config['data'][0]['streamfileids'].keys())
-
- if format is None or format == 'best':
- if 'hd2' in supported_format:
- format = 'hd2'
- else:
- format = 'flv'
- ext = u'flv'
- elif format == 'worst':
- format = 'mp4'
- ext = u'mp4'
- else:
- format = 'flv'
- ext = u'flv'
+ config = self._download_json(info_url, video_id)
+
+ error_code = config['data'][0].get('error_code')
+ if error_code:
+ # -8 means blocked outside China.
+ error = config['data'][0].get('error') # Chinese and English, separated by newline.
+ raise ExtractorError(error or 'Server reported error %i' % error_code,
+ expected=True)
+ video_title = config['data'][0]['title']
+ seed = config['data'][0]['seed']
- fileid = config['data'][0]['streamfileids'][format]
- keys = [s['k'] for s in config['data'][0]['segs'][format]]
- # segs is usually a dictionary, but an empty *list* if an error occured.
- except (UnicodeDecodeError, ValueError, KeyError):
- raise ExtractorError(u'Unable to extract info section')
+ format = self._downloader.params.get('format', None)
+ supported_format = list(config['data'][0]['streamfileids'].keys())
+
+ # TODO proper format selection
+ if format is None or format == 'best':
+ if 'hd2' in supported_format:
+ format = 'hd2'
+ else:
+ format = 'flv'
+ ext = 'flv'
+ elif format == 'worst':
+ format = 'mp4'
+ ext = 'mp4'
+ else:
+ format = 'flv'
+ ext = 'flv'
+
+ fileid = config['data'][0]['streamfileids'][format]
+ keys = [s['k'] for s in config['data'][0]['segs'][format]]
+ # segs is usually a dictionary, but an empty *list* if an error occured.
files_info=[]
sid = self._gen_sid()
#column 8,9 of fileid represent the segment number
#fileid[7:9] should be changed
for index, key in enumerate(keys):
-
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
- download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+ download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = {
'id': '%s_part%02d' % (video_id, index),
_VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
_TEST = {
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
- 'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
'info_dict': {
'id': '505835',
'ext': 'mp4',
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class YourUploadIE(InfoExtractor):
+ _VALID_URL = r'''(?x)https?://(?:www\.)?
+ (?:yourupload\.com/watch|
+ embed\.yourupload\.com|
+ embed\.yucache\.net
+ )/(?P<id>[A-Za-z0-9]+)
+ '''
+ _TESTS = [
+ {
+ 'url': 'http://yourupload.com/watch/14i14h',
+ 'md5': 'bf5c2f95c4c917536e80936af7bc51e1',
+ 'info_dict': {
+ 'id': '14i14h',
+ 'ext': 'mp4',
+ 'title': 'BigBuckBunny_320x180.mp4',
+ 'thumbnail': 're:^https?://.*\.jpe?g',
+ }
+ },
+ {
+ 'url': 'http://embed.yourupload.com/14i14h',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://embed.yucache.net/14i14h?client_file_id=803349',
+ 'only_matching': True,
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ url = 'http://embed.yucache.net/{0:}'.format(video_id)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._og_search_title(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+ url = self._og_search_video_url(webpage)
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ }
# coding: utf-8
-import errno
-import io
+from __future__ import unicode_literals
+
+
import itertools
import json
import os.path
compat_str,
clean_html,
- get_cachedir,
get_element_by_id,
get_element_by_attribute,
ExtractorError,
int_or_none,
- PagedList,
+ OnDemandPagedList,
unescapeHTML,
unified_strdate,
orderedSet,
- write_json_file,
uppercase_escape,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
+ _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
_LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NETRC_MACHINE = 'youtube'
def _set_language(self):
return bool(self._download_webpage(
self._LANG_URL, None,
- note=u'Setting language', errnote='unable to set language',
+ note='Setting language', errnote='unable to set language',
fatal=False))
def _login(self):
+ """
+ Attempt to log in to YouTube.
+ True is returned if successful or skipped.
+ False is returned if login failed.
+
+ If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
+ """
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
- raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
- return False
+ raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
+ return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
- note=u'Downloading login page',
- errnote=u'unable to fetch login page', fatal=False)
+ note='Downloading login page',
+ errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
- login_page, u'Login GALX parameter')
+ login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
- u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- u'Email': username,
- u'GALX': galx,
- u'Passwd': password,
- u'PersistentCookie': u'yes',
- u'_utf8': u'霱',
- u'bgresponse': u'js_disabled',
- u'checkConnection': u'',
- u'checkedDomains': u'youtube',
- u'dnConn': u'',
- u'pstMsg': u'0',
- u'rmShown': u'1',
- u'secTok': u'',
- u'signIn': u'Sign in',
- u'timeStmp': u'',
- u'service': u'youtube',
- u'uilel': u'3',
- u'hl': u'en_US',
+ 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ 'Email': username,
+ 'GALX': galx,
+ 'Passwd': password,
+
+ 'PersistentCookie': 'yes',
+ '_utf8': '霱',
+ 'bgresponse': 'js_disabled',
+ 'checkConnection': '',
+ 'checkedDomains': 'youtube',
+ 'dnConn': '',
+ 'pstMsg': '0',
+ 'rmShown': '1',
+ 'secTok': '',
+ 'signIn': 'Sign in',
+ 'timeStmp': '',
+ 'service': 'youtube',
+ 'uilel': '3',
+ 'hl': 'en_US',
}
+
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
# chokes on unicode
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
- note=u'Logging in', errnote=u'unable to log in', fatal=False)
+ note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
+
+ if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
+ raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
+
+ # Two-Factor
+ # TODO add SMS and phone call support - these require making a request and then prompting the user
+
+ if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
+ tfa_code = self._get_tfa_info()
+
+ if tfa_code is None:
+ self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
+ self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
+ return False
+
+ # Unlike the first login form, secTok and timeStmp are both required for the TFA form
+
+ match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
+ if match is None:
+ self._downloader.report_warning('Failed to get secTok - did the page structure change?')
+ secTok = match.group(1)
+ match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
+ if match is None:
+ self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
+ timeStmp = match.group(1)
+
+ tfa_form_strs = {
+ 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ 'smsToken': '',
+ 'smsUserPin': tfa_code,
+ 'smsVerifyPin': 'Verify',
+
+ 'PersistentCookie': 'yes',
+ 'checkConnection': '',
+ 'checkedDomains': 'youtube',
+ 'pstMsg': '1',
+ 'secTok': secTok,
+ 'timeStmp': timeStmp,
+ 'service': 'youtube',
+ 'hl': 'en_US',
+ }
+ tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
+ tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
+
+ tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
+ tfa_results = self._download_webpage(
+ tfa_req, None,
+ note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
+
+ if tfa_results is False:
+ return False
+
+ if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
+ self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
+ return False
+ if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
+ self._downloader.report_warning('unable to log in - did the page structure change?')
+ return False
+ if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
+ self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
+ return False
+
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
- self._downloader.report_warning(u'unable to log in: bad username or password')
+ self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
self._download_webpage(
req, None,
- note=u'Confirming age', errnote=u'Unable to confirm age')
- return True
+ note='Confirming age', errnote='Unable to confirm age',
+ fatal=False)
def _real_initialize(self):
if self._downloader is None:
return
- if not self._set_language():
- return
+ if self._get_login_info()[0] is not None:
+ if not self._set_language():
+ return
if not self._login():
return
self._confirm_age()
class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
- IE_DESC = u'YouTube.com'
+ IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
- (?:https?://|//)? # http(s):// or protocol-independent URL (optional)
+ (?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
- (?:(?:v|embed|e)/) # v/ or embed/ or e/
+ (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
)
))
|youtu\.be/ # just youtu.be/xxxx
- |https?://(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
+ |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
+ (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
+ '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+ '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+ '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
# Dash webm audio
- '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
+ '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
+ # Dash mov
+ '298': {'ext': 'mov', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+ '299': {'ext': 'mov', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
+ '266': {'ext': 'mov', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
+
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
- IE_NAME = u'youtube'
+ IE_NAME = 'youtube'
_TESTS = [
{
- u"url": u"http://www.youtube.com/watch?v=BaW_jenozKc",
- u"file": u"BaW_jenozKc.mp4",
- u"info_dict": {
- u"title": u"youtube-dl test video \"'/\\ä↭𝕐",
- u"uploader": u"Philipp Hagemeister",
- u"uploader_id": u"phihag",
- u"upload_date": u"20121002",
- u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
- u"categories": [u'Science & Technology'],
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
+ 'info_dict': {
+ 'id': 'BaW_jenozKc',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
+ 'uploader': 'Philipp Hagemeister',
+ 'uploader_id': 'phihag',
+ 'upload_date': '20121002',
+ 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
+ 'categories': ['Science & Technology'],
+ 'like_count': int,
+ 'dislike_count': int,
}
},
{
- u"url": u"http://www.youtube.com/watch?v=UxxajLWwzqY",
- u"file": u"UxxajLWwzqY.mp4",
- u"note": u"Test generic use_cipher_signature video (#897)",
- u"info_dict": {
- u"upload_date": u"20120506",
- u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
- u"description": u"md5:fea86fda2d5a5784273df5c7cc994d9f",
- u"uploader": u"Icona Pop",
- u"uploader_id": u"IconaPop"
+ 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
+ 'note': 'Test generic use_cipher_signature video (#897)',
+ 'info_dict': {
+ 'id': 'UxxajLWwzqY',
+ 'ext': 'mp4',
+ 'upload_date': '20120506',
+ 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
+ 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
+ 'uploader': 'Icona Pop',
+ 'uploader_id': 'IconaPop',
}
},
{
- u"url": u"https://www.youtube.com/watch?v=07FYdnEawAQ",
- u"file": u"07FYdnEawAQ.mp4",
- u"note": u"Test VEVO video with age protection (#956)",
- u"info_dict": {
- u"upload_date": u"20130703",
- u"title": u"Justin Timberlake - Tunnel Vision (Explicit)",
- u"description": u"md5:64249768eec3bc4276236606ea996373",
- u"uploader": u"justintimberlakeVEVO",
- u"uploader_id": u"justintimberlakeVEVO"
+ 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
+ 'note': 'Test VEVO video with age protection (#956)',
+ 'info_dict': {
+ 'id': '07FYdnEawAQ',
+ 'ext': 'mp4',
+ 'upload_date': '20130703',
+ 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
+ 'description': 'md5:64249768eec3bc4276236606ea996373',
+ 'uploader': 'justintimberlakeVEVO',
+ 'uploader_id': 'justintimberlakeVEVO',
}
},
{
- u"url": u"//www.YouTube.com/watch?v=yZIXLfi8CZQ",
- u"file": u"yZIXLfi8CZQ.mp4",
- u"note": u"Embed-only video (#1746)",
- u"info_dict": {
- u"upload_date": u"20120608",
- u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012",
- u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7",
- u"uploader": u"SET India",
- u"uploader_id": u"setindia"
+ 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
+ 'note': 'Embed-only video (#1746)',
+ 'info_dict': {
+ 'id': 'yZIXLfi8CZQ',
+ 'ext': 'mp4',
+ 'upload_date': '20120608',
+ 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
+ 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
+ 'uploader': 'SET India',
+ 'uploader_id': 'setindia'
}
},
{
- u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
- u"file": u"a9LDPn-MO4I.m4a",
- u"note": u"256k DASH audio (format 141) via DASH manifest",
- u"info_dict": {
- u"upload_date": "20121002",
- u"uploader_id": "8KVIDEO",
- u"description": "No description available.",
- u"uploader": "8KVIDEO",
- u"title": "UHDTV TEST 8K VIDEO.mp4"
+ 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
+ 'note': '256k DASH audio (format 141) via DASH manifest',
+ 'info_dict': {
+ 'id': 'a9LDPn-MO4I',
+ 'ext': 'm4a',
+ 'upload_date': '20121002',
+ 'uploader_id': '8KVIDEO',
+ 'description': '',
+ 'uploader': '8KVIDEO',
+ 'title': 'UHDTV TEST 8K VIDEO.mp4'
},
- u"params": {
- u"youtube_include_dash_manifest": True,
- u"format": "141",
+ 'params': {
+ 'youtube_include_dash_manifest': True,
+ 'format': '141',
},
},
# DASH manifest with encrypted signature
{
- u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
- u'info_dict': {
- u'id': u'IB3lcPjvWLA',
- u'ext': u'm4a',
- u'title': u'Afrojack - The Spark ft. Spree Wilson',
- u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
- u'uploader': u'AfrojackVEVO',
- u'uploader_id': u'AfrojackVEVO',
- u'upload_date': u'20131011',
+ 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
+ 'info_dict': {
+ 'id': 'IB3lcPjvWLA',
+ 'ext': 'm4a',
+ 'title': 'Afrojack - The Spark ft. Spree Wilson',
+ 'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
+ 'uploader': 'AfrojackVEVO',
+ 'uploader_id': 'AfrojackVEVO',
+ 'upload_date': '20131011',
},
- u"params": {
- u'youtube_include_dash_manifest': True,
- u'format': '141',
+ 'params': {
+ 'youtube_include_dash_manifest': True,
+ 'format': '141',
},
},
]
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- if YoutubePlaylistIE.suitable(url): return False
- return re.match(cls._VALID_URL, url) is not None
-
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
- self.to_screen(u'%s: Downloading video info webpage' % video_id)
+ self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
- self.to_screen(u'%s: Extracting video information' % video_id)
+ self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
- self.to_screen(u'%s: Format %s not available' % (video_id, format))
+ self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
- self.to_screen(u'RTMP download detected')
+ self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
- return u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))
+ return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
- cache_dir = get_cachedir(self._downloader.params)
- cache_enabled = cache_dir is not None
- if cache_enabled:
- cache_fn = os.path.join(os.path.expanduser(cache_dir),
- u'youtube-sigfuncs',
- func_id + '.json')
- try:
- with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
- cache_spec = json.load(cachef)
- return lambda s: u''.join(s[i] for i in cache_spec)
- except IOError:
- pass # No cache available
+ cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
+ if cache_spec is not None:
+ return lambda s: ''.join(s[i] for i in cache_spec)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
- note=u'Downloading %s player %s' % (player_type, player_id),
- errnote=u'Download of %s failed' % player_url)
+ note='Downloading %s player %s' % (player_type, player_id),
+ errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
- note=u'Downloading %s player %s' % (player_type, player_id),
- errnote=u'Download of %s failed' % player_url)
+ note='Downloading %s player %s' % (player_type, player_id),
+ errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
- if cache_enabled:
- try:
- test_string = u''.join(map(compat_chr, range(len(example_sig))))
- cache_res = res(test_string)
- cache_spec = [ord(c) for c in cache_res]
- try:
- os.makedirs(os.path.dirname(cache_fn))
- except OSError as ose:
- if ose.errno != errno.EEXIST:
- raise
- write_json_file(cache_spec, cache_fn)
- except Exception:
- tb = traceback.format_exc()
- self._downloader.report_warning(
- u'Writing cache to %r failed: %s' % (cache_fn, tb))
+ if cache_spec is None:
+ test_string = ''.join(map(compat_chr, range(len(example_sig))))
+ cache_res = res(test_string)
+ cache_spec = [ord(c) for c in cache_res]
+ self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
- starts = u'' if start == 0 else str(start)
- ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
- steps = u'' if step == 1 else (u':%d' % step)
- return u's[%s%s%s]' % (starts, ends, steps)
+ starts = '' if start == 0 else str(start)
+ ends = (':%d' % (end+step)) if end + step >= 0 else ':'
+ steps = '' if step == 1 else (':%d' % step)
+ return 's[%s%s%s]' % (starts, ends, steps)
step = None
start = '(Never used)' # Quelch pyflakes warnings - start will be
start = prev
continue
else:
- yield u's[%d]' % prev
+ yield 's[%d]' % prev
if step is None:
- yield u's[%d]' % i
+ yield 's[%d]' % i
else:
yield _genslice(start, i, step)
- test_string = u''.join(map(compat_chr, range(len(example_sig))))
+ test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
- expr_code = u' + '.join(gen_sig_code(cache_spec))
+ expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
- code = (u'if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
- u' return %s\n') % (signature_id_tuple, expr_code)
- self.to_screen(u'Extracted signature function:\n' + code)
+ code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
+ ' return %s\n') % (signature_id_tuple, expr_code)
+ self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'signature=([$a-zA-Z]+)', jscode,
- u'Initial JS player signature function name')
+ 'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
- TARGET_CLASSNAME = u'SignatureDecipher'
+ TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
- initial_function = swfi.extract_function(searched_class, u'decipher')
+ initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
- raise ExtractorError(u'Cannot decrypt signature without player_url')
+ raise ExtractorError('Cannot decrypt signature without player_url')
- if player_url.startswith(u'//'):
- player_url = u'https:' + player_url
+ if player_url.startswith('//'):
+ player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
- u'Signature extraction failed: ' + tb, cause=e)
+ 'Signature extraction failed: ' + tb, cause=e)
def _get_available_subtitles(self, video_id, webpage):
try:
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
sub_lang_list = {}
for l in lang_list:
lang = l[1]
+ if lang in sub_lang_list:
+ continue
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
'name': unescapeHTML(l[0]).encode('utf-8'),
})
- url = u'https://www.youtube.com/api/timedtext?' + params
+ url = 'https://www.youtube.com/api/timedtext?' + params
sub_lang_list[lang] = url
if not sub_lang_list:
- self._downloader.report_warning(u'video doesn\'t have subtitles')
+ self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
- self.to_screen(u'%s: Looking for automatic captions' % video_id)
+ self.to_screen('%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
- err_msg = u'Couldn\'t find automatic captions for %s' % video_id
+ err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
- self._downloader.report_warning(u'Video doesn\'t have automatic captions')
+ self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
- manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
+ manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
- return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.')
+ return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _real_extract(self, url):
proto = (
- u'http' if self._downloader.params.get('prefer_insecure', False)
- else u'https')
+ 'http' if self._downloader.params.get('prefer_insecure', False)
+ else 'https')
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+ pref_cookies = [
+ c for c in self._downloader.cookiejar
+ if c.domain == '.youtube.com' and c.name == 'PREF']
+ for pc in pref_cookies:
+ if 'hl=' in pc.value:
+ pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
+ else:
+ if pc.value:
+ pc.value += '&'
+ pc.value += 'hl=en'
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
if 'token' not in video_info:
if 'reason' in video_info:
raise ExtractorError(
- u'YouTube said: %s' % video_info['reason'][0],
+ 'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
- u'"token" parameter not in video info for unknown reason',
+ '"token" parameter not in video info for unknown reason',
video_id=video_id)
if 'view_count' in video_info:
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
- raise ExtractorError(u'"rental" videos not supported')
+ raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
- raise ExtractorError(u'Unable to extract uploader name')
+ raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
# uploader_id
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
- self._downloader.report_warning(u'unable to extract uploader nickname')
+ self._downloader.report_warning('unable to extract uploader nickname')
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
- self._downloader.report_warning(u'Unable to extract video title')
- video_title = u'_'
+ self._downloader.report_warning('Unable to extract video title')
+ video_title = '_'
# thumbnail image
# We try first to get a high quality image:
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
- self._downloader.report_warning(u'unable to extract video thumbnail')
+ self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
- m_cat_container = get_element_by_id("eow-category", video_webpage)
+ m_cat_container = self._search_regex(
+ r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
+ video_webpage, 'categories', fatal=False)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
- video_description = u''
+ video_description = ''
- def _extract_count(klass):
+ def _extract_count(count_name):
count = self._search_regex(
- r'class="%s">([\d,]+)</span>' % re.escape(klass),
- video_webpage, klass, default=None)
+ r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
+ video_webpage, count_name, default=None)
if count is not None:
return int(count.replace(',', ''))
return None
- like_count = _extract_count(u'likes-count')
- dislike_count = _extract_count(u'dislikes-count')
+ like_count = _extract_count('like')
+ dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
return
if 'length_seconds' not in video_info:
- self._downloader.report_warning(u'unable to extract video duration')
+ self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
# Easy way to know if the 's' value is in url_encoded_fmt_stream_map
# this signatures are encrypted
if 'url_encoded_fmt_stream_map' not in args:
- raise ValueError(u'No stream_map present') # caught below
+ raise ValueError('No stream_map present') # caught below
re_signature = re.compile(r'[&,]s=')
m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
if m_s is not None:
- self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
+ self.to_screen('%s: Encrypted signatures detected.' % video_id)
video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
- m_s = re_signature.search(args.get('adaptive_fmts', u''))
+ m_s = re_signature.search(args.get('adaptive_fmts', ''))
if m_s is not None:
if 'adaptive_fmts' in video_info:
video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
if not age_gate:
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")',
- video_webpage, u'JS player URL')
+ video_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
- video_webpage, u'age gate player URL')
+ video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
- u'flash player', fatal=False)
+ 'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player)?\.js',
player_url,
'html5 player', fatal=False)
- player_desc = u'html5 player %s' % player_version
+ player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
- self.to_screen(u'{%s} signature length %s, %s' %
+ self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
- raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
+ raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
- if (self._downloader.params.get('youtube_include_dash_manifest', False)):
+ if self._downloader.params.get('youtube_include_dash_manifest', True):
try:
# The DASH manifest used needs to be the one from the original video_webpage.
# The one found in get_video_info seems to be using different signatures.
dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
- note=u'Downloading DASH manifest',
- errnote=u'Could not download DASH manifest')
- for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
+ note='Downloading DASH manifest',
+ errnote='Could not download DASH manifest')
+ for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
existing_format.update(f)
except (ExtractorError, KeyError) as e:
- self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
+ self.report_warning('Skipping DASH manifest: %s' % e, video_id)
self._sort_formats(formats)
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
- IE_DESC = u'YouTube.com playlists'
+ IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
- (?:course|view_play_list|my_playlists|artist|playlist|watch)
+ (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_MORE_PAGES_INDICATOR = r'data-link-type="next"'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
- IE_NAME = u'youtube:playlist'
+ IE_NAME = 'youtube:playlist'
+ _TESTS = [{
+ 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
+ 'info_dict': {
+ 'title': 'ytdl test PL',
+ },
+ 'playlist_count': 3,
+ }, {
+ 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
+ 'info_dict': {
+ 'title': 'YDL_Empty_List',
+ },
+ 'playlist_count': 0,
+ }, {
+ 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
+ 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
+ 'info_dict': {
+ 'title': '29C3: Not my department',
+ },
+ 'playlist_count': 95,
+ }, {
+ 'note': 'issue #673',
+ 'url': 'PLBB231211A4F62143',
+ 'info_dict': {
+ 'title': '[OLD]Team Fortress 2 (Class-based LP)',
+ },
+ 'playlist_mincount': 26,
+ }, {
+ 'note': 'Large playlist',
+ 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
+ 'info_dict': {
+ 'title': 'Uploads from Cauchemar',
+ },
+ 'playlist_mincount': 799,
+ }, {
+ 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
+ 'info_dict': {
+ 'title': 'YDL_safe_search',
+ },
+ 'playlist_count': 2,
+ }, {
+ 'note': 'embedded',
+ 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
+ 'playlist_count': 4,
+ 'info_dict': {
+ 'title': 'JODA15',
+ }
+ }, {
+ 'note': 'Embedded SWF player',
+ 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
+ 'playlist_count': 4,
+ 'info_dict': {
+ 'title': 'JODA7',
+ }
+ }]
def _real_initialize(self):
self._login()
def _ids_to_results(self, ids):
- return [self.url_result(vid_id, 'Youtube', video_id=vid_id)
- for vid_id in ids]
+ return [
+ self.url_result(vid_id, 'Youtube', video_id=vid_id)
+ for vid_id in ids]
def _extract_mix(self, playlist_id):
# The mixes are generated from a a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
- webpage = self._download_webpage(url, playlist_id, u'Downloading Youtube mix')
+ webpage = self._download_webpage(
+ url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
- title_span = (search_title('playlist-title') or
- search_title('title long-title') or search_title('title'))
+ title_span = (
+ search_title('playlist-title') or
+ search_title('title long-title') or
+ search_title('title'))
title = clean_html(title_span)
- video_re = r'''(?x)data-video-username=".*?".*?
- href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id)
- ids = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
+ ids = orderedSet(re.findall(
+ r'''(?xs)data-video-username=".*?".*?
+ href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
+ webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
- self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id)
+ self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
- self.to_screen(u'Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+ self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
if playlist_id.startswith('TL'):
- raise ExtractorError(u'For downloading YouTube.com top lists, use '
- u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
+ raise ExtractorError('For downloading YouTube.com top lists, use '
+ 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# Check if the playlist exists or is private
if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
raise ExtractorError(
- u'The playlist doesn\'t exist or is private, use --username or '
+ 'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
- page, u'title')
+ page, 'title')
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, playlist_title)
class YoutubeTopListIE(YoutubePlaylistIE):
- IE_NAME = u'youtube:toplist'
- IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
- u' (Example: "yttoplist:music:Top Tracks")')
+ IE_NAME = 'youtube:toplist'
+ IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
+ ' (Example: "yttoplist:music:Top Tracks")')
_VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
+ _TESTS = [{
+ 'url': 'yttoplist:music:Trending',
+ 'playlist_mincount': 5,
+ 'skip': 'Only works for logged-in users',
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel = mobj.group('chann')
title = mobj.group('title')
query = compat_urllib_parse.urlencode({'title': title})
- playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
- channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
- link = self._html_search_regex(playlist_re, channel_page, u'list')
+ channel_page = self._download_webpage(
+ 'https://www.youtube.com/%s' % channel, title)
+ link = self._html_search_regex(
+ r'''(?x)
+ <a\s+href="([^"]+)".*?>\s*
+ <span\s+class="branded-page-module-title-text">\s*
+ <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
+ channel_page, 'list')
url = compat_urlparse.urljoin('https://www.youtube.com/', link)
video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
# sometimes the webpage doesn't contain the videos
# retry until we get them
for i in itertools.count(0):
- msg = u'Downloading Youtube mix'
+ msg = 'Downloading Youtube mix'
if i > 0:
msg += ', retry #%d' % i
+
webpage = self._download_webpage(url, title, msg)
ids = orderedSet(re.findall(video_re, webpage))
if ids:
class YoutubeChannelIE(InfoExtractor):
- IE_DESC = u'YouTube.com channels'
+ IE_DESC = 'YouTube.com channels'
_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
_MORE_PAGES_INDICATOR = 'yt-uix-load-more'
_MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
- IE_NAME = u'youtube:channel'
+ IE_NAME = 'youtube:channel'
+ _TESTS = [{
+ 'note': 'paginated channel',
+ 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
+ 'playlist_mincount': 91,
+ }]
def extract_videos_from_page(self, page):
ids_in_page = []
# Extract channel id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError('Invalid URL: %s' % url)
# Download channel page
channel_id = mobj.group(1)
for pagenum in itertools.count(1):
url = self._MORE_PAGES_URL % (pagenum, channel_id)
page = self._download_json(
- url, channel_id, note=u'Downloading page #%s' % pagenum,
+ url, channel_id, note='Downloading page #%s' % pagenum,
transform_source=uppercase_escape)
ids_in_page = self.extract_videos_from_page(page['content_html'])
if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
break
- self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+ self._downloader.to_screen('[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
class YoutubeUserIE(InfoExtractor):
- IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
+ IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
- IE_NAME = u'youtube:user'
+ IE_NAME = 'youtube:user'
+
+ _TESTS = [{
+ 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
+ 'playlist_mincount': 320,
+ 'info_dict': {
+ 'title': 'TheLinuxFoundation',
+ }
+ }, {
+ 'url': 'ytuser:phihag',
+ 'only_matching': True,
+ }]
@classmethod
def suitable(cls, url):
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
+ raise ExtractorError('Invalid URL: %s' % url)
username = mobj.group(1)
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
page = self._download_webpage(
gdata_url, username,
- u'Downloading video ids from %d to %d' % (
+ 'Downloading video ids from %d to %d' % (
start_index, start_index + self._GDATA_PAGE_SIZE))
try:
response = json.loads(page)
except ValueError as err:
- raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
+ raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
if 'entry' not in response['feed']:
return
'id': video_id,
'title': title,
}
- url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
+ url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
return self.playlist_result(url_results, playlist_title=username)
class YoutubeSearchIE(SearchInfoExtractor):
- IE_DESC = u'YouTube.com searches'
- _API_URL = u'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
+ IE_DESC = 'YouTube.com searches'
+ _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_MAX_RESULTS = 1000
- IE_NAME = u'youtube:search'
+ IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
def _get_n_results(self, query, n):
compat_urllib_parse.quote_plus(query.encode('utf-8')),
(PAGE_SIZE * pagenum) + 1)
data_json = self._download_webpage(
- result_url, video_id=u'query "%s"' % query,
- note=u'Downloading page %s' % (pagenum + 1),
- errnote=u'Unable to download API page')
+ result_url, video_id='query "%s"' % query,
+ note='Downloading page %s' % (pagenum + 1),
+ errnote='Unable to download API page')
data = json.loads(data_json)
api_response = data['data']
if 'items' not in api_response:
raise ExtractorError(
- u'[youtube] No video results', expected=True)
+ '[youtube] No video results', expected=True)
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
_SEARCH_KEY = 'ytsearchdate'
- IE_DESC = u'YouTube.com searches, newest videos first'
+ IE_DESC = 'YouTube.com searches, newest videos first'
class YoutubeSearchURLIE(InfoExtractor):
- IE_DESC = u'YouTube.com search URLs'
- IE_NAME = u'youtube:search_url'
+ IE_DESC = 'YouTube.com search URLs'
+ IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
+ _TESTS = [{
+ 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
+ 'playlist_mincount': 5,
+ 'info_dict': {
+ 'title': 'youtube-dl test video',
+ }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
- r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
+ r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
class YoutubeShowIE(InfoExtractor):
- IE_DESC = u'YouTube.com (multi-season) shows'
- _VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
- IE_NAME = u'youtube:show'
+ IE_DESC = 'YouTube.com (multi-season) shows'
+ _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
+ IE_NAME = 'youtube:show'
+ _TESTS = [{
+ 'url': 'http://www.youtube.com/show/airdisasters',
+ 'playlist_mincount': 3,
+ 'info_dict': {
+ 'id': 'airdisasters',
+ 'title': 'Air Disasters',
+ }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- show_name = mobj.group(1)
- webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
+ playlist_id = mobj.group('id')
+ webpage = self._download_webpage(
+ url, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
- self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
- return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons]
+ self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
+ entries = [
+ self.url_result(
+ 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
+ for season in m_seasons
+ ]
+ title = self._og_search_title(webpage, fatal=False)
+
+ return {
+ '_type': 'playlist',
+ 'id': playlist_id,
+ 'title': title,
+ 'entries': entries,
+ }
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
@property
def IE_NAME(self):
- return u'youtube:%s' % self._FEED_NAME
+ return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
paging = 0
for i in itertools.count(1):
info = self._download_json(self._FEED_TEMPLATE % paging,
- u'%s feed' % self._FEED_NAME,
- u'Downloading page %s' % i)
+ '%s feed' % self._FEED_NAME,
+ 'Downloading page %s' % i)
feed_html = info.get('feed_html') or info.get('content_html')
+ load_more_widget_html = info.get('load_more_widget_html') or feed_html
m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
ids = orderedSet(m.group(1) for m in m_ids)
feed_entries.extend(
for video_id in ids)
mobj = re.search(
r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
- feed_html)
+ load_more_widget_html)
if mobj is None:
break
paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
-class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
- _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
- _FEED_NAME = 'subscriptions'
- _PLAYLIST_TITLE = u'Youtube Subscriptions'
-
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
+ IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
- _PLAYLIST_TITLE = u'Youtube Recommended videos'
+ _PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
+ IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
_FEED_NAME = 'watch_later'
- _PLAYLIST_TITLE = u'Youtube Watch Later'
+ _PLAYLIST_TITLE = 'Youtube Watch Later'
_PERSONAL_FEED = True
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
- _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
+ IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
+ _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PERSONAL_FEED = True
- _PLAYLIST_TITLE = u'Youtube Watch History'
+ _PLAYLIST_TITLE = 'Youtube Watch History'
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
- IE_NAME = u'youtube:favorites'
- IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
+ IE_NAME = 'youtube:favorites'
+ IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
- playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
+ playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
+class YoutubeSubscriptionsIE(YoutubePlaylistIE):
+ IE_NAME = 'youtube:subscriptions'
+ IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
+ _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
+ _TESTS = []
+
+ def _real_extract(self, url):
+ title = 'Youtube Subscriptions'
+ page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
+
+ # The extraction process is the same as for playlists, but the regex
+ # for the video ids doesn't contain an index
+ ids = []
+ more_widget_html = content_html = page
+
+ for page_num in itertools.count(1):
+ matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
+ new_ids = orderedSet(matches)
+ ids.extend(new_ids)
+
+ mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
+ if not mobj:
+ break
+
+ more = self._download_json(
+ 'https://youtube.com/%s' % mobj.group('more'), title,
+ 'Downloading page #%s' % page_num,
+ transform_source=uppercase_escape)
+ content_html = more['content_html']
+ more_widget_html = more['load_more_widget_html']
+
+ return {
+ '_type': 'playlist',
+ 'title': title,
+ 'entries': self._ids_to_results(ids),
+ }
+
+
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
def _real_extract(self, url):
raise ExtractorError(
- u'Did you forget to quote the URL? Remember that & is a meta '
- u'character in most shells, so you want to put the URL in quotes, '
- u'like youtube-dl '
- u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
- u' or simply youtube-dl BaW_jenozKc .',
+ 'Did you forget to quote the URL? Remember that & is a meta '
+ 'character in most shells, so you want to put the URL in quotes, '
+ 'like youtube-dl '
+ '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
+ ' or simply youtube-dl BaW_jenozKc .',
expected=True)
)
+def extract_from_xml_url(ie, video_id, xml_url):
+ doc = ie._download_xml(
+ xml_url, video_id,
+ note='Downloading video info',
+ errnote='Failed to download video info')
+
+ title = doc.find('.//information/title').text
+ description = doc.find('.//information/detail').text
+ duration = int(doc.find('.//details/lengthSec').text)
+ uploader_node = doc.find('.//details/originChannelTitle')
+ uploader = None if uploader_node is None else uploader_node.text
+ uploader_id_node = doc.find('.//details/originChannelId')
+ uploader_id = None if uploader_id_node is None else uploader_id_node.text
+ upload_date = unified_strdate(doc.find('.//details/airtime').text)
+
+ def xml_to_format(fnode):
+ video_url = fnode.find('url').text
+ is_available = 'http://www.metafilegenerator' not in video_url
+
+ format_id = fnode.attrib['basetype']
+ format_m = re.match(r'''(?x)
+ (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
+ (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
+ ''', format_id)
+
+ ext = format_m.group('container')
+ proto = format_m.group('proto').lower()
+
+ quality = fnode.find('./quality').text
+ abr = int(fnode.find('./audioBitrate').text) // 1000
+ vbr_node = fnode.find('./videoBitrate')
+ vbr = None if vbr_node is None else int(vbr_node.text) // 1000
+
+ width_node = fnode.find('./width')
+ width = None if width_node is None else int_or_none(width_node.text)
+ height_node = fnode.find('./height')
+ height = None if height_node is None else int_or_none(height_node.text)
+
+ format_note = ''
+ if not format_note:
+ format_note = None
+
+ return {
+ 'format_id': format_id + '-' + quality,
+ 'url': video_url,
+ 'ext': ext,
+ 'acodec': format_m.group('acodec'),
+ 'vcodec': format_m.group('vcodec'),
+ 'abr': abr,
+ 'vbr': vbr,
+ 'width': width,
+ 'height': height,
+ 'filesize': int_or_none(fnode.find('./filesize').text),
+ 'format_note': format_note,
+ 'protocol': proto,
+ '_available': is_available,
+ }
+
+ format_nodes = doc.findall('.//formitaeten/formitaet')
+ formats = list(filter(
+ lambda f: f['_available'],
+ map(xml_to_format, format_nodes)))
+ ie._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'upload_date': upload_date,
+ 'formats': formats,
+ }
+
+
class ZDFIE(InfoExtractor):
- _VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<video_id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
+ _VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
_TEST = {
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('video_id')
+ video_id = self._match_id(url)
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
- doc = self._download_xml(
- xml_url, video_id,
- note='Downloading video info',
- errnote='Failed to download video info')
-
- title = doc.find('.//information/title').text
- description = doc.find('.//information/detail').text
- duration = int(doc.find('.//details/lengthSec').text)
- uploader_node = doc.find('.//details/originChannelTitle')
- uploader = None if uploader_node is None else uploader_node.text
- uploader_id_node = doc.find('.//details/originChannelId')
- uploader_id = None if uploader_id_node is None else uploader_id_node.text
- upload_date = unified_strdate(doc.find('.//details/airtime').text)
-
- def xml_to_format(fnode):
- video_url = fnode.find('url').text
- is_available = 'http://www.metafilegenerator' not in video_url
-
- format_id = fnode.attrib['basetype']
- format_m = re.match(r'''(?x)
- (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
- (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
- ''', format_id)
-
- ext = format_m.group('container')
- proto = format_m.group('proto').lower()
-
- quality = fnode.find('./quality').text
- abr = int(fnode.find('./audioBitrate').text) // 1000
- vbr_node = fnode.find('./videoBitrate')
- vbr = None if vbr_node is None else int(vbr_node.text) // 1000
-
- width_node = fnode.find('./width')
- width = None if width_node is None else int_or_none(width_node.text)
- height_node = fnode.find('./height')
- height = None if height_node is None else int_or_none(height_node.text)
-
- format_note = ''
- if not format_note:
- format_note = None
-
- return {
- 'format_id': format_id + '-' + quality,
- 'url': video_url,
- 'ext': ext,
- 'acodec': format_m.group('acodec'),
- 'vcodec': format_m.group('vcodec'),
- 'abr': abr,
- 'vbr': vbr,
- 'width': width,
- 'height': height,
- 'filesize': int_or_none(fnode.find('./filesize').text),
- 'format_note': format_note,
- 'protocol': proto,
- '_available': is_available,
- }
-
- format_nodes = doc.findall('.//formitaeten/formitaet')
- formats = list(filter(
- lambda f: f['_available'],
- map(xml_to_format, format_nodes)))
-
- self._sort_formats(formats)
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'upload_date': upload_date,
- 'formats': formats,
- }
\ No newline at end of file
+ return extract_from_xml_url(self, video_id, xml_url)
--- /dev/null
+from __future__ import unicode_literals
+
+import os.path
+import optparse
+import shlex
+import sys
+
+from .utils import (
+ compat_expanduser,
+ compat_getenv,
+ get_term_width,
+ write_string,
+)
+from .version import __version__
+
+
+def parseOpts(overrideArguments=None):
+ def _readOptions(filename_bytes, default=[]):
+ try:
+ optionf = open(filename_bytes)
+ except IOError:
+ return default # silently skip if file is not present
+ try:
+ res = []
+ for l in optionf:
+ res += shlex.split(l, comments=True)
+ finally:
+ optionf.close()
+ return res
+
+ def _readUserConf():
+ xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
+ if xdg_config_home:
+ userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
+ if not os.path.isfile(userConfFile):
+ userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
+ else:
+ userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
+ if not os.path.isfile(userConfFile):
+ userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
+ userConf = _readOptions(userConfFile, None)
+
+ if userConf is None:
+ appdata_dir = compat_getenv('appdata')
+ if appdata_dir:
+ userConf = _readOptions(
+ os.path.join(appdata_dir, 'youtube-dl', 'config'),
+ default=None)
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
+ default=None)
+
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
+ default=None)
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
+ default=None)
+
+ if userConf is None:
+ userConf = []
+
+ return userConf
+
+ def _format_option_string(option):
+ ''' ('-o', '--option') -> -o, --format METAVAR'''
+
+ opts = []
+
+ if option._short_opts:
+ opts.append(option._short_opts[0])
+ if option._long_opts:
+ opts.append(option._long_opts[0])
+ if len(opts) > 1:
+ opts.insert(1, ', ')
+
+ if option.takes_value():
+ opts.append(' %s' % option.metavar)
+
+ return "".join(opts)
+
+ def _comma_separated_values_options_callback(option, opt_str, value, parser):
+ setattr(parser.values, option.dest, value.split(','))
+
+ def _hide_login_info(opts):
+ opts = list(opts)
+ for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
+ try:
+ i = opts.index(private_opt)
+ opts[i + 1] = 'PRIVATE'
+ except ValueError:
+ pass
+ return opts
+
+ # No need to wrap help messages if we're on a wide console
+ columns = get_term_width()
+ max_width = columns if columns else 80
+ max_help_position = 80
+
+ fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+ fmt.format_option_strings = _format_option_string
+
+ kw = {
+ 'version': __version__,
+ 'formatter': fmt,
+ 'usage': '%prog [options] url [url...]',
+ 'conflict_handler': 'resolve',
+ }
+
+ parser = optparse.OptionParser(**kw)
+
+ general = optparse.OptionGroup(parser, 'General Options')
+ general.add_option(
+ '-h', '--help',
+ action='help',
+ help='print this help text and exit')
+ general.add_option(
+ '-v', '--version',
+ action='version',
+ help='print program version and exit')
+ general.add_option(
+ '-U', '--update',
+ action='store_true', dest='update_self',
+ help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
+ general.add_option(
+ '-i', '--ignore-errors',
+ action='store_true', dest='ignoreerrors', default=False,
+ help='continue on download errors, for example to skip unavailable videos in a playlist')
+ general.add_option(
+ '--abort-on-error',
+ action='store_false', dest='ignoreerrors',
+ help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
+ general.add_option(
+ '--dump-user-agent',
+ action='store_true', dest='dump_user_agent', default=False,
+ help='display the current browser identification')
+ general.add_option(
+ '--list-extractors',
+ action='store_true', dest='list_extractors', default=False,
+ help='List all supported extractors and the URLs they would handle')
+ general.add_option(
+ '--extractor-descriptions',
+ action='store_true', dest='list_extractor_descriptions', default=False,
+ help='Output descriptions of all supported extractors')
+ general.add_option(
+ '--proxy', dest='proxy',
+ default=None, metavar='URL',
+ help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
+ general.add_option(
+ '--socket-timeout',
+ dest='socket_timeout', type=float, default=None,
+ help='Time to wait before giving up, in seconds')
+ general.add_option(
+ '--default-search',
+ dest='default_search', metavar='PREFIX',
+ help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
+ general.add_option(
+ '--ignore-config',
+ action='store_true',
+ help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
+ general.add_option(
+ '--flat-playlist',
+ action='store_const', dest='extract_flat', const='in_playlist',
+ default=False,
+ help='Do not extract the videos of a playlist, only list them.')
+
+ selection = optparse.OptionGroup(parser, 'Video Selection')
+ selection.add_option(
+ '--playlist-start',
+ dest='playliststart', metavar='NUMBER', default=1, type=int,
+ help='playlist video to start at (default is %default)')
+ selection.add_option(
+ '--playlist-end',
+ dest='playlistend', metavar='NUMBER', default=None, type=int,
+ help='playlist video to end at (default is last)')
+ selection.add_option(
+ '--match-title',
+ dest='matchtitle', metavar='REGEX',
+ help='download only matching titles (regex or caseless sub-string)')
+ selection.add_option(
+ '--reject-title',
+ dest='rejecttitle', metavar='REGEX',
+ help='skip download for matching titles (regex or caseless sub-string)')
+ selection.add_option(
+ '--max-downloads',
+ dest='max_downloads', metavar='NUMBER', type=int, default=None,
+ help='Abort after downloading NUMBER files')
+ selection.add_option(
+ '--min-filesize',
+ metavar='SIZE', dest='min_filesize', default=None,
+ help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
+ selection.add_option(
+ '--max-filesize',
+ metavar='SIZE', dest='max_filesize', default=None,
+ help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
+ selection.add_option(
+ '--date',
+ metavar='DATE', dest='date', default=None,
+ help='download only videos uploaded in this date')
+ selection.add_option(
+ '--datebefore',
+ metavar='DATE', dest='datebefore', default=None,
+ help='download only videos uploaded on or before this date (i.e. inclusive)')
+ selection.add_option(
+ '--dateafter',
+ metavar='DATE', dest='dateafter', default=None,
+ help='download only videos uploaded on or after this date (i.e. inclusive)')
+ selection.add_option(
+ '--min-views',
+ metavar='COUNT', dest='min_views', default=None, type=int,
+ help='Do not download any videos with less than COUNT views',)
+ selection.add_option(
+ '--max-views',
+ metavar='COUNT', dest='max_views', default=None, type=int,
+ help='Do not download any videos with more than COUNT views')
+ selection.add_option(
+ '--no-playlist',
+ action='store_true', dest='noplaylist', default=False,
+ help='download only the currently playing video')
+ selection.add_option(
+ '--age-limit',
+ metavar='YEARS', dest='age_limit', default=None, type=int,
+ help='download only videos suitable for the given age')
+ selection.add_option(
+ '--download-archive', metavar='FILE',
+ dest='download_archive',
+ help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
+ selection.add_option(
+ '--include-ads',
+ dest='include_ads', action='store_true',
+ help='Download advertisements as well (experimental)')
+
+ authentication = optparse.OptionGroup(parser, 'Authentication Options')
+ authentication.add_option(
+ '-u', '--username',
+ dest='username', metavar='USERNAME',
+ help='login with this account ID')
+ authentication.add_option(
+ '-p', '--password',
+ dest='password', metavar='PASSWORD',
+ help='account password')
+ authentication.add_option(
+ '-2', '--twofactor',
+ dest='twofactor', metavar='TWOFACTOR',
+ help='two-factor auth code')
+ authentication.add_option(
+ '-n', '--netrc',
+ action='store_true', dest='usenetrc', default=False,
+ help='use .netrc authentication data')
+ authentication.add_option(
+ '--video-password',
+ dest='videopassword', metavar='PASSWORD',
+ help='video password (vimeo, smotri)')
+
+ video_format = optparse.OptionGroup(parser, 'Video Format Options')
+ video_format.add_option(
+ '-f', '--format',
+ action='store', dest='format', metavar='FORMAT', default=None,
+ help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
+ video_format.add_option(
+ '--all-formats',
+ action='store_const', dest='format', const='all',
+ help='download all available video formats')
+ video_format.add_option(
+ '--prefer-free-formats',
+ action='store_true', dest='prefer_free_formats', default=False,
+ help='prefer free video formats unless a specific one is requested')
+ video_format.add_option(
+ '--max-quality',
+ action='store', dest='format_limit', metavar='FORMAT',
+ help='highest quality format to download')
+ video_format.add_option(
+ '-F', '--list-formats',
+ action='store_true', dest='listformats',
+ help='list all available formats')
+ video_format.add_option(
+ '--youtube-include-dash-manifest',
+ action='store_true', dest='youtube_include_dash_manifest', default=True,
+ help=optparse.SUPPRESS_HELP)
+ video_format.add_option(
+ '--youtube-skip-dash-manifest',
+ action='store_false', dest='youtube_include_dash_manifest',
+ help='Do not download the DASH manifest on YouTube videos')
+
+ subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
+ subtitles.add_option(
+ '--write-sub', '--write-srt',
+ action='store_true', dest='writesubtitles', default=False,
+ help='write subtitle file')
+ subtitles.add_option(
+ '--write-auto-sub', '--write-automatic-sub',
+ action='store_true', dest='writeautomaticsub', default=False,
+ help='write automatic subtitle file (youtube only)')
+ subtitles.add_option(
+ '--all-subs',
+ action='store_true', dest='allsubtitles', default=False,
+ help='downloads all the available subtitles of the video')
+ subtitles.add_option(
+ '--list-subs',
+ action='store_true', dest='listsubtitles', default=False,
+ help='lists all available subtitles for the video')
+ subtitles.add_option(
+ '--sub-format',
+ action='store', dest='subtitlesformat', metavar='FORMAT', default='srt',
+ help='subtitle format (default=srt) ([sbv/vtt] youtube only)')
+ subtitles.add_option(
+ '--sub-lang', '--sub-langs', '--srt-lang',
+ action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
+ default=[], callback=_comma_separated_values_options_callback,
+ help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
+
+ downloader = optparse.OptionGroup(parser, 'Download Options')
+ downloader.add_option(
+ '-r', '--rate-limit',
+ dest='ratelimit', metavar='LIMIT',
+ help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
+ downloader.add_option(
+ '-R', '--retries',
+ dest='retries', metavar='RETRIES', default=10,
+ help='number of retries (default is %default)')
+ downloader.add_option(
+ '--buffer-size',
+ dest='buffersize', metavar='SIZE', default='1024',
+ help='size of download buffer (e.g. 1024 or 16K) (default is %default)')
+ downloader.add_option(
+ '--no-resize-buffer',
+ action='store_true', dest='noresizebuffer', default=False,
+ help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
+ downloader.add_option(
+ '--test',
+ action='store_true', dest='test', default=False,
+ help=optparse.SUPPRESS_HELP)
+
+ workarounds = optparse.OptionGroup(parser, 'Workarounds')
+ workarounds.add_option(
+ '--encoding',
+ dest='encoding', metavar='ENCODING',
+ help='Force the specified encoding (experimental)')
+ workarounds.add_option(
+ '--no-check-certificate',
+ action='store_true', dest='no_check_certificate', default=False,
+ help='Suppress HTTPS certificate validation.')
+ workarounds.add_option(
+ '--prefer-insecure',
+ '--prefer-unsecure', action='store_true', dest='prefer_insecure',
+ help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
+ workarounds.add_option(
+ '--user-agent',
+ metavar='UA', dest='user_agent',
+ help='specify a custom user agent')
+ workarounds.add_option(
+ '--referer',
+ metavar='URL', dest='referer', default=None,
+ help='specify a custom referer, use if the video access is restricted to one domain',
+ )
+ workarounds.add_option(
+ '--add-header',
+ metavar='FIELD:VALUE', dest='headers', action='append',
+ help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
+ )
+ workarounds.add_option(
+ '--bidi-workaround',
+ dest='bidi_workaround', action='store_true',
+ help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
+
+ verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+ verbosity.add_option(
+ '-q', '--quiet',
+ action='store_true', dest='quiet', default=False,
+ help='activates quiet mode')
+ verbosity.add_option(
+ '--no-warnings',
+ dest='no_warnings', action='store_true', default=False,
+ help='Ignore warnings')
+ verbosity.add_option(
+ '-s', '--simulate',
+ action='store_true', dest='simulate', default=False,
+ help='do not download the video and do not write anything to disk',)
+ verbosity.add_option(
+ '--skip-download',
+ action='store_true', dest='skip_download', default=False,
+ help='do not download the video',)
+ verbosity.add_option(
+ '-g', '--get-url',
+ action='store_true', dest='geturl', default=False,
+ help='simulate, quiet but print URL')
+ verbosity.add_option(
+ '-e', '--get-title',
+ action='store_true', dest='gettitle', default=False,
+ help='simulate, quiet but print title')
+ verbosity.add_option(
+ '--get-id',
+ action='store_true', dest='getid', default=False,
+ help='simulate, quiet but print id')
+ verbosity.add_option(
+ '--get-thumbnail',
+ action='store_true', dest='getthumbnail', default=False,
+ help='simulate, quiet but print thumbnail URL')
+ verbosity.add_option(
+ '--get-description',
+ action='store_true', dest='getdescription', default=False,
+ help='simulate, quiet but print video description')
+ verbosity.add_option(
+ '--get-duration',
+ action='store_true', dest='getduration', default=False,
+ help='simulate, quiet but print video length')
+ verbosity.add_option(
+ '--get-filename',
+ action='store_true', dest='getfilename', default=False,
+ help='simulate, quiet but print output filename')
+ verbosity.add_option(
+ '--get-format',
+ action='store_true', dest='getformat', default=False,
+ help='simulate, quiet but print output format')
+ verbosity.add_option(
+ '-j', '--dump-json',
+ action='store_true', dest='dumpjson', default=False,
+ help='simulate, quiet but print JSON information. See --output for a description of available keys.')
+ verbosity.add_option(
+ '-J', '--dump-single-json',
+ action='store_true', dest='dump_single_json', default=False,
+ help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
+ verbosity.add_option(
+ '--newline',
+ action='store_true', dest='progress_with_newline', default=False,
+ help='output progress bar as new lines')
+ verbosity.add_option(
+ '--no-progress',
+ action='store_true', dest='noprogress', default=False,
+ help='do not print progress bar')
+ verbosity.add_option(
+ '--console-title',
+ action='store_true', dest='consoletitle', default=False,
+ help='display progress in console titlebar')
+ verbosity.add_option(
+ '-v', '--verbose',
+ action='store_true', dest='verbose', default=False,
+ help='print various debugging information')
+ verbosity.add_option(
+ '--dump-intermediate-pages',
+ action='store_true', dest='dump_intermediate_pages', default=False,
+ help='print downloaded pages to debug problems (very verbose)')
+ verbosity.add_option(
+ '--write-pages',
+ action='store_true', dest='write_pages', default=False,
+ help='Write downloaded intermediary pages to files in the current directory to debug problems')
+ verbosity.add_option(
+ '--youtube-print-sig-code',
+ action='store_true', dest='youtube_print_sig_code', default=False,
+ help=optparse.SUPPRESS_HELP)
+ verbosity.add_option(
+ '--print-traffic',
+ dest='debug_printtraffic', action='store_true', default=False,
+ help='Display sent and read HTTP traffic')
+
+ filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
+ filesystem.add_option(
+ '-a', '--batch-file',
+ dest='batchfile', metavar='FILE',
+ help='file containing URLs to download (\'-\' for stdin)')
+ filesystem.add_option(
+ '--id', default=False,
+ action='store_true', dest='useid', help='use only video ID in file name')
+ filesystem.add_option(
+ '-A', '--auto-number',
+ action='store_true', dest='autonumber', default=False,
+ help='number downloaded files starting from 00000')
+ filesystem.add_option(
+ '-o', '--output',
+ dest='outtmpl', metavar='TEMPLATE',
+ help=('output filename template. Use %(title)s to get the title, '
+ '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
+ '%(autonumber)s to get an automatically incremented number, '
+ '%(ext)s for the filename extension, '
+ '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
+ '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
+ '%(upload_date)s for the upload date (YYYYMMDD), '
+ '%(extractor)s for the provider (youtube, metacafe, etc), '
+ '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
+ '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
+ '%(height)s and %(width)s for the width and height of the video format. '
+ '%(resolution)s for a textual description of the resolution of the video format. '
+ 'Use - to output to stdout. Can also be used to download to a different directory, '
+ 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
+ filesystem.add_option(
+ '--autonumber-size',
+ dest='autonumber_size', metavar='NUMBER',
+ help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
+ filesystem.add_option(
+ '--restrict-filenames',
+ action='store_true', dest='restrictfilenames', default=False,
+ help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
+ filesystem.add_option(
+ '-t', '--title',
+ action='store_true', dest='usetitle', default=False,
+ help='[deprecated] use title in file name (default)')
+ filesystem.add_option(
+ '-l', '--literal', default=False,
+ action='store_true', dest='usetitle',
+ help='[deprecated] alias of --title')
+ filesystem.add_option(
+ '-w', '--no-overwrites',
+ action='store_true', dest='nooverwrites', default=False,
+ help='do not overwrite files')
+ filesystem.add_option(
+ '-c', '--continue',
+ action='store_true', dest='continue_dl', default=True,
+ help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
+ filesystem.add_option(
+ '--no-continue',
+ action='store_false', dest='continue_dl',
+ help='do not resume partially downloaded files (restart from beginning)')
+ filesystem.add_option(
+ '--no-part',
+ action='store_true', dest='nopart', default=False,
+ help='do not use .part files - write directly into output file')
+ filesystem.add_option(
+ '--no-mtime',
+ action='store_false', dest='updatetime', default=True,
+ help='do not use the Last-modified header to set the file modification time')
+ filesystem.add_option(
+ '--write-description',
+ action='store_true', dest='writedescription', default=False,
+ help='write video description to a .description file')
+ filesystem.add_option(
+ '--write-info-json',
+ action='store_true', dest='writeinfojson', default=False,
+ help='write video metadata to a .info.json file')
+ filesystem.add_option(
+ '--write-annotations',
+ action='store_true', dest='writeannotations', default=False,
+ help='write video annotations to a .annotation file')
+ filesystem.add_option(
+ '--write-thumbnail',
+ action='store_true', dest='writethumbnail', default=False,
+ help='write thumbnail image to disk')
+ filesystem.add_option(
+ '--load-info',
+ dest='load_info_filename', metavar='FILE',
+ help='json file containing the video information (created with the "--write-json" option)')
+ filesystem.add_option(
+ '--cookies',
+ dest='cookiefile', metavar='FILE',
+ help='file to read cookies from and dump cookie jar in')
+ filesystem.add_option(
+ '--cache-dir', dest='cachedir', default=None, metavar='DIR',
+ help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
+ filesystem.add_option(
+ '--no-cache-dir', action='store_const', const=False, dest='cachedir',
+ help='Disable filesystem caching')
+ filesystem.add_option(
+ '--rm-cache-dir',
+ action='store_true', dest='rm_cachedir',
+ help='Delete all filesystem cache files')
+
+ postproc = optparse.OptionGroup(parser, 'Post-processing Options')
+ postproc.add_option(
+ '-x', '--extract-audio',
+ action='store_true', dest='extractaudio', default=False,
+ help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
+ postproc.add_option(
+ '--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+ help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
+ postproc.add_option(
+ '--audio-quality', metavar='QUALITY',
+ dest='audioquality', default='5',
+ help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
+ postproc.add_option(
+ '--recode-video',
+ metavar='FORMAT', dest='recodevideo', default=None,
+ help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
+ postproc.add_option(
+ '-k', '--keep-video',
+ action='store_true', dest='keepvideo', default=False,
+ help='keeps the video file on disk after the post-processing; the video is erased by default')
+ postproc.add_option(
+ '--no-post-overwrites',
+ action='store_true', dest='nopostoverwrites', default=False,
+ help='do not overwrite post-processed files; the post-processed files are overwritten by default')
+ postproc.add_option(
+ '--embed-subs',
+ action='store_true', dest='embedsubtitles', default=False,
+ help='embed subtitles in the video (only for mp4 videos)')
+ postproc.add_option(
+ '--embed-thumbnail',
+ action='store_true', dest='embedthumbnail', default=False,
+ help='embed thumbnail in the audio as cover art')
+ postproc.add_option(
+ '--add-metadata',
+ action='store_true', dest='addmetadata', default=False,
+ help='write metadata to the video file')
+ postproc.add_option(
+ '--xattrs',
+ action='store_true', dest='xattrs', default=False,
+ help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
+ postproc.add_option(
+ '--prefer-avconv',
+ action='store_false', dest='prefer_ffmpeg',
+ help='Prefer avconv over ffmpeg for running the postprocessors (default)')
+ postproc.add_option(
+ '--prefer-ffmpeg',
+ action='store_true', dest='prefer_ffmpeg',
+ help='Prefer ffmpeg over avconv for running the postprocessors')
+ postproc.add_option(
+ '--exec',
+ metavar='CMD', dest='exec_cmd',
+ help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
+
+ parser.add_option_group(general)
+ parser.add_option_group(selection)
+ parser.add_option_group(downloader)
+ parser.add_option_group(filesystem)
+ parser.add_option_group(verbosity)
+ parser.add_option_group(workarounds)
+ parser.add_option_group(video_format)
+ parser.add_option_group(subtitles)
+ parser.add_option_group(authentication)
+ parser.add_option_group(postproc)
+
+ if overrideArguments is not None:
+ opts, args = parser.parse_args(overrideArguments)
+ if opts.verbose:
+ write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
+ else:
+ commandLineConf = sys.argv[1:]
+ if '--ignore-config' in commandLineConf:
+ systemConf = []
+ userConf = []
+ else:
+ systemConf = _readOptions('/etc/youtube-dl.conf')
+ if '--ignore-config' in systemConf:
+ userConf = []
+ else:
+ userConf = _readUserConf()
+ argv = systemConf + userConf + commandLineConf
+
+ opts, args = parser.parse_args(argv)
+ if opts.verbose:
+ write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
+ write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
+ write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
+
+ return parser, opts, args
from .atomicparsley import AtomicParsleyPP
from .ffmpeg import (
+ FFmpegPostProcessor,
FFmpegAudioFixPP,
+ FFmpegEmbedSubtitlePP,
+ FFmpegExtractAudioPP,
FFmpegMergerPP,
FFmpegMetadataPP,
FFmpegVideoConvertor,
- FFmpegExtractAudioPP,
- FFmpegEmbedSubtitlePP,
)
from .xattrpp import XAttrMetadataPP
+from .execafterdownload import ExecAfterDownloadPP
__all__ = [
'AtomicParsleyPP',
+ 'ExecAfterDownloadPP',
'FFmpegAudioFixPP',
+ 'FFmpegEmbedSubtitlePP',
+ 'FFmpegExtractAudioPP',
'FFmpegMergerPP',
'FFmpegMetadataPP',
+ 'FFmpegPostProcessor',
'FFmpegVideoConvertor',
- 'FFmpegExtractAudioPP',
- 'FFmpegEmbedSubtitlePP',
'XAttrMetadataPP',
]
--- /dev/null
+from __future__ import unicode_literals
+
+import subprocess
+
+from .common import PostProcessor
+from ..utils import (
+ shlex_quote,
+ PostProcessingError,
+)
+
+
+class ExecAfterDownloadPP(PostProcessor):
+ def __init__(self, downloader=None, verboseOutput=None, exec_cmd=None):
+ self.verboseOutput = verboseOutput
+ self.exec_cmd = exec_cmd
+
+ def run(self, information):
+ cmd = self.exec_cmd
+ if not '{}' in cmd:
+ cmd += ' {}'
+
+ cmd = cmd.replace('{}', shlex_quote(information['filepath']))
+
+ self._downloader.to_screen("[exec] Executing command: %s" % cmd)
+ retCode = subprocess.call(cmd, shell=True)
+ if retCode != 0:
+ raise PostProcessingError(
+ 'Command returned error code %d' % retCode)
+
+ return None, information # by default, keep file and do nothing
+
import os
+import re
import subprocess
import sys
import time
from .common import AudioConversionError, PostProcessor
from ..utils import (
- check_executable,
compat_subprocess_get_DEVNULL,
encodeArgument,
encodeFilename,
+ is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
)
+def get_version(executable):
+ """ Returns the version of the specified executable,
+ or False if the executable is not present """
+ try:
+ out, err = subprocess.Popen(
+ [executable, '-version'],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
+ except OSError:
+ return False
+ firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
+ m = re.search(r'version\s+([0-9._-a-zA-Z]+)', firstline)
+ if not m:
+ return u'present'
+ else:
+ return m.group(1)
+
+
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None, deletetempfiles=False):
PostProcessor.__init__(self, downloader)
- self._exes = self.detect_executables()
+ self._versions = self.get_versions()
self._deletetempfiles = deletetempfiles
+ def check_version(self):
+ if not self._executable:
+ raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
+
+ REQUIRED_VERSION = '1.0'
+ if is_outdated_version(
+ self._versions[self._executable], REQUIRED_VERSION):
+ warning = u'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
+ self._executable, self._executable, REQUIRED_VERSION)
+ if self._downloader:
+ self._downloader.report_warning(warning)
+
@staticmethod
- def detect_executables():
+ def get_versions():
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
- return dict((program, check_executable(program, ['-version'])) for program in programs)
+ return dict((program, get_version(program)) for program in programs)
+
+ @property
+ def _executable(self):
+ if self._downloader.params.get('prefer_ffmpeg', False):
+ prefs = ('ffmpeg', 'avconv')
+ else:
+ prefs = ('avconv', 'ffmpeg')
+ for p in prefs:
+ if self._versions[p]:
+ return p
+ return None
- def _get_executable(self):
+ @property
+ def _probe_executable(self):
if self._downloader.params.get('prefer_ffmpeg', False):
- return self._exes['ffmpeg'] or self._exes['avconv']
+ prefs = ('ffprobe', 'avprobe')
else:
- return self._exes['avconv'] or self._exes['ffmpeg']
+ prefs = ('avprobe', 'ffprobe')
+ for p in prefs:
+ if self._versions[p]:
+ return p
+ return None
def _uses_avconv(self):
- return self._get_executable() == self._exes['avconv']
+ return self._executable == 'avconv'
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
- if not self._get_executable():
- raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
+ self.check_version()
files_cmd = []
for path in input_paths:
files_cmd.extend(['-i', encodeFilename(path, True)])
- cmd = ([self._get_executable(), '-y'] + files_cmd
+ cmd = ([self._executable, '-y'] + files_cmd
+ [encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
- if not self._exes['ffprobe'] and not self._exes['avprobe']:
+
+ if not self._probe_executable:
raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.')
try:
cmd = [
- self._exes['avprobe'] or self._exes['ffprobe'],
+ self._probe_executable,
'-show_streams',
encodeFilename(self._ffmpeg_filename_argument(path), True)]
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
else:
- self._downloader.to_screen(u'[' + self._get_executable() + '] Destination: ' + new_path)
+ self._downloader.to_screen(u'[' + self._executable + '] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except:
etype,e,tb = sys.exc_info()
if isinstance(e, AudioConversionError):
msg = u'audio conversion failed: ' + e.msg
else:
- msg = u'error running ' + self._get_executable()
+ msg = u'error running ' + self._executable
raise PostProcessingError(msg)
# Try to update the date time for extracted audio file.
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
- args = ['-c', 'copy']
+ args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest']
self._downloader.to_screen(u'[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
return True, info
import struct
import subprocess
import sys
+import tempfile
import traceback
import xml.etree.ElementTree
import zlib
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
+try:
+ from shlex import quote as shlex_quote
+except ImportError: # Python < 3.3
+ def shlex_quote(s):
+ return "'" + s.replace("'", "'\"'\"'") + "'"
+
+
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
+
+if sys.version_info >= (3, 0):
+ compat_getenv = os.getenv
+ compat_expanduser = os.path.expanduser
+else:
+ # Environment variables should be decoded with filesystem encoding.
+ # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
+
+ def compat_getenv(key, default=None):
+ env = os.getenv(key, default)
+ if env:
+ env = env.decode(get_filesystem_encoding())
+ return env
+
+ # HACK: The default implementations of os.path.expanduser from cpython do not decode
+ # environment variables with filesystem encoding. We will work around this by
+ # providing adjusted implementations.
+ # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
+ # for different platforms with correct environment variables decoding.
+
+ if os.name == 'posix':
+ def compat_expanduser(path):
+ """Expand ~ and ~user constructions. If user or $HOME is unknown,
+ do nothing."""
+ if not path.startswith('~'):
+ return path
+ i = path.find('/', 1)
+ if i < 0:
+ i = len(path)
+ if i == 1:
+ if 'HOME' not in os.environ:
+ import pwd
+ userhome = pwd.getpwuid(os.getuid()).pw_dir
+ else:
+ userhome = compat_getenv('HOME')
+ else:
+ import pwd
+ try:
+ pwent = pwd.getpwnam(path[1:i])
+ except KeyError:
+ return path
+ userhome = pwent.pw_dir
+ userhome = userhome.rstrip('/')
+ return (userhome + path[i:]) or '/'
+ elif os.name == 'nt' or os.name == 'ce':
+ def compat_expanduser(path):
+ """Expand ~ and ~user constructs.
+
+ If user or $HOME is unknown, do nothing."""
+ if path[:1] != '~':
+ return path
+ i, n = 1, len(path)
+ while i < n and path[i] not in '/\\':
+ i = i + 1
+
+ if 'HOME' in os.environ:
+ userhome = compat_getenv('HOME')
+ elif 'USERPROFILE' in os.environ:
+ userhome = compat_getenv('USERPROFILE')
+ elif not 'HOMEPATH' in os.environ:
+ return path
+ else:
+ try:
+ drive = compat_getenv('HOMEDRIVE')
+ except KeyError:
+ drive = ''
+ userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
+
+ if i != 1: #~user
+ userhome = os.path.join(os.path.dirname(userhome), path[1:i])
+
+ return userhome + path[i:]
+ else:
+ compat_expanduser = os.path.expanduser
+
+
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
assert type(s) == type(u'')
print(s)
-# In Python 2.x, json.dump expects a bytestream.
-# In Python 3.x, it writes to a character stream
-if sys.version_info < (3,0):
- def write_json_file(obj, fn):
- with open(fn, 'wb') as f:
- json.dump(obj, f)
-else:
- def write_json_file(obj, fn):
- with open(fn, 'w', encoding='utf-8') as f:
- json.dump(obj, f)
-if sys.version_info >= (2,7):
+def write_json_file(obj, fn):
+ """ Encode obj as JSON and write it to fn, atomically """
+
+ args = {
+ 'suffix': '.tmp',
+ 'prefix': os.path.basename(fn) + '.',
+ 'dir': os.path.dirname(fn),
+ 'delete': False,
+ }
+
+ # In Python 2.x, json.dump expects a bytestream.
+ # In Python 3.x, it writes to a character stream
+ if sys.version_info < (3, 0):
+ args['mode'] = 'wb'
+ else:
+ args.update({
+ 'mode': 'w',
+ 'encoding': 'utf-8',
+ })
+
+ tf = tempfile.NamedTemporaryFile(**args)
+
+ try:
+ with tf:
+ json.dump(obj, tf)
+ os.rename(tf.name, fn)
+ except:
+ try:
+ os.remove(tf.name)
+ except OSError:
+ pass
+ raise
+
+
+if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z-]+$', key)
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val):
+ # Here comes the crazy part: In 2.6, if the xpath is a unicode,
+ # .//node does not match if a node is a direct child of . !
+ if isinstance(xpath, unicode):
+ xpath = xpath.encode('ascii')
+
for f in node.findall(xpath):
if f.attrib.get(key) == val:
return f
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
-def htmlentity_transform(matchobj):
- """Transforms an HTML entity to a character.
- This function receives a match object and is intended to be used with
- the re.sub() function.
- """
- entity = matchobj.group(1)
+def xpath_text(node, xpath, name=None, fatal=False):
+ if sys.version_info < (2, 7): # Crazy 2.6
+ xpath = xpath.encode('ascii')
- # Known non-numeric HTML entity
- if entity in compat_html_entities.name2codepoint:
- return compat_chr(compat_html_entities.name2codepoint[entity])
-
- mobj = re.match(u'(?u)#(x?\\d+)', entity)
- if mobj is not None:
- numstr = mobj.group(1)
- if numstr.startswith(u'x'):
- base = 16
- numstr = u'0%s' % numstr
+ n = node.find(xpath)
+ if n is None:
+ if fatal:
+ name = xpath if name is None else name
+ raise ExtractorError('Could not find XML element %s' % name)
else:
- base = 10
- return compat_chr(int(numstr, base))
+ return None
+ return n.text
- # Unknown entity in name, return its literal representation
- return (u'&%s;' % entity)
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class BaseHTMLParser(compat_html_parser.HTMLParser):
return res
+def _htmlentity_transform(entity):
+ """Transforms an HTML entity to a character."""
+ # Known non-numeric HTML entity
+ if entity in compat_html_entities.name2codepoint:
+ return compat_chr(compat_html_entities.name2codepoint[entity])
+
+ mobj = re.match(r'#(x?[0-9]+)', entity)
+ if mobj is not None:
+ numstr = mobj.group(1)
+ if numstr.startswith(u'x'):
+ base = 16
+ numstr = u'0%s' % numstr
+ else:
+ base = 10
+ return compat_chr(int(numstr, base))
+
+ # Unknown entity in name, return its literal representation
+ return (u'&%s;' % entity)
+
+
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
- result = re.sub(r'(?u)&(.+?);', htmlentity_transform, s)
- return result
+ return re.sub(
+ r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
def encodeFilename(s, for_subprocess=False):
self.sock = sock
self._tunnel()
try:
- self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv3)
+ self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
except ssl.SSLError:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23)
def https_open(self, req):
return self.do_open(HTTPSConnectionV3, req)
return HTTPSHandlerV3(**kwargs)
- else:
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
+ elif hasattr(ssl, 'create_default_context'): # Python >= 3.4
+ context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ context.options &= ~ssl.OP_NO_SSLv3 # Allow older, not-as-secure SSLv3
+ if opts_no_check_certificate:
+ context.verify_mode = ssl.CERT_NONE
+ return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
+ else: # Python < 3.4
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
+ if cause:
+ msg += u' (caused by %r)' % cause
if not expected:
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
super(ExtractorError, self).__init__(msg)
return ret
def http_request(self, req):
- for h,v in std_headers.items():
- if h in req.headers:
- del req.headers[h]
- req.add_header(h, v)
+ for h, v in std_headers.items():
+ if h not in req.headers:
+ req.add_header(h, v)
if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['User-agent']
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
del req.headers['Youtubedl-user-agent']
+
+ if sys.version_info < (2, 7) and '#' in req.get_full_url():
+ # Python 2.6 is brain-dead when it comes to fragments
+ req._Request__original = req._Request__original.partition('#')[0]
+ req._Request__r_type = req._Request__r_type.partition('#')[0]
+
return req
def http_response(self, req, resp):
return None
m = re.search(
- r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
+ r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
- date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+ date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
'%b %dnd %Y %I:%M%p',
'%b %dth %Y %I:%M%p',
'%Y-%m-%d',
+ '%Y/%m/%d',
'%d.%m.%Y',
'%d/%m/%Y',
+ '%d/%m/%y',
'%Y/%m/%d %H:%M:%S',
+ '%d/%m/%Y %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
+ '%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
return bytes(xs)
-def get_cachedir(params={}):
- cache_root = os.environ.get('XDG_CACHE_HOME',
- os.path.expanduser('~/.cache'))
- return params.get('cachedir', os.path.join(cache_root, 'youtube-dl'))
-
-
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import fcntl
def _lock_file(f, exclusive):
- fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
+ fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
- fcntl.lockf(f, fcntl.LOCK_UN)
+ fcntl.flock(f, fcntl.LOCK_UN)
class locked_file(object):
return self.f.read(*args)
+def get_filesystem_encoding():
+ encoding = sys.getfilesystemencoding()
+ return encoding if encoding is not None else 'utf-8'
+
+
def shell_quote(args):
quoted_args = []
- encoding = sys.getfilesystemencoding()
- if encoding is None:
- encoding = 'utf-8'
+ encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
def get_term_width():
- columns = os.environ.get('COLUMNS', None)
+ columns = compat_getenv('COLUMNS', None)
if columns:
return int(columns)
return s
+def remove_end(s, end):
+ if s.endswith(end):
+ return s[:-len(end)]
+ return s
+
+
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip(u'/').split(u'/')[-1]
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
+ if v == '':
+ v = None
return default if v is None else (int(v) * invscale // scale)
+def str_or_none(v, default=None):
+ return default if v is None else compat_str(v)
+
+
def str_to_int(int_str):
+ """ A more relaxed version of int_or_none """
if int_str is None:
return None
- int_str = re.sub(r'[,\.]', u'', int_str)
+ int_str = re.sub(r'[,\.\+]', u'', int_str)
return int(int_str)
if s is None:
return None
+ s = s.strip()
+
m = re.match(
- r'(?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$', s)
+ r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s)
if not m:
return None
res = int(m.group('secs'))
res += int(m.group('mins')) * 60
if m.group('hours'):
res += int(m.group('hours')) * 60 * 60
+ if m.group('ms'):
+ res += float(m.group('ms'))
return res
class PagedList(object):
- def __init__(self, pagefunc, pagesize):
- self._pagefunc = pagefunc
- self._pagesize = pagesize
-
def __len__(self):
# This is only useful for tests
return len(self.getslice())
+
+class OnDemandPagedList(PagedList):
+ def __init__(self, pagefunc, pagesize):
+ self._pagefunc = pagefunc
+ self._pagesize = pagesize
+
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
return res
+class InAdvancePagedList(PagedList):
+ def __init__(self, pagefunc, pagecount, pagesize):
+ self._pagefunc = pagefunc
+ self._pagecount = pagecount
+ self._pagesize = pagesize
+
+ def getslice(self, start=0, end=None):
+ res = []
+ start_page = start // self._pagesize
+ end_page = (
+ self._pagecount if end is None else (end // self._pagesize + 1))
+ skip_elems = start - start_page * self._pagesize
+ only_more = None if end is None else end - start
+ for pagenum in range(start_page, end_page):
+ page = list(self._pagefunc(pagenum))
+ if skip_elems:
+ page = page[skip_elems:]
+ skip_elems = None
+ if only_more is not None:
+ if len(page) < only_more:
+ only_more -= len(page)
+ else:
+ page = page[:only_more]
+ res.extend(page)
+ break
+ res.extend(page)
+ return res
+
+
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
lambda m: unicode_escape(m.group(0))[0],
s)
+
+def escape_rfc3986(s):
+ """Escape non-ASCII characters as suggested by RFC 3986"""
+ if sys.version_info < (3, 0) and isinstance(s, unicode):
+ s = s.encode('utf-8')
+ return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]")
+
+
+def escape_url(url):
+ """Escape URL as suggested by RFC 3986"""
+ url_parsed = compat_urllib_parse_urlparse(url)
+ return url_parsed._replace(
+ path=escape_rfc3986(url_parsed.path),
+ params=escape_rfc3986(url_parsed.params),
+ query=escape_rfc3986(url_parsed.query),
+ fragment=escape_rfc3986(url_parsed.fragment)
+ ).geturl()
+
try:
struct.pack(u'!I', 0)
except TypeError:
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+try:
+ etree_iter = xml.etree.ElementTree.Element.iter
+except AttributeError: # Python <=2.6
+ etree_iter = lambda n: n.findall('.//*')
+
+
def parse_xml(s):
class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
def doctype(self, name, pubid, system):
parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
- return xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
+ tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
+ # Fix up XML parser in Python 2.x
+ if sys.version_info < (3, 0):
+ for n in etree_iter(tree):
+ if n.text is not None:
+ if not isinstance(n.text, compat_str):
+ n.text = n.text.decode('utf-8')
+ return tree
if sys.version_info < (3, 0) and sys.platform == 'win32':
}
+def parse_age_limit(s):
+ if s is None:
+ return None
+ m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
+ return int(m.group('age')) if m else US_RATINGS.get(s, None)
+
+
def strip_jsonp(code):
return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
+def js_to_json(code):
+ def fix_kv(m):
+ v = m.group(0)
+ if v in ('true', 'false', 'null'):
+ return v
+ if v.startswith('"'):
+ return v
+ if v.startswith("'"):
+ v = v[1:-1]
+ v = re.sub(r"\\\\|\\'|\"", lambda m: {
+ '\\\\': '\\\\',
+ "\\'": "'",
+ '"': '\\"',
+ }[m.group(0)], v)
+ return '"%s"' % v
+
+ res = re.sub(r'''(?x)
+ "(?:[^"\\]*(?:\\\\|\\")?)*"|
+ '(?:[^'\\]*(?:\\\\|\\')?)*'|
+ [a-zA-Z_][a-zA-Z_0-9]*
+ ''', fix_kv, code)
+ res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
+ return res
+
+
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
+
+
+def limit_length(s, length):
+ """ Add ellipses to overly long strings """
+ if s is None:
+ return None
+ ELLIPSES = '...'
+ if len(s) > length:
+ return s[:length - len(ELLIPSES)] + ELLIPSES
+ return s
+
+
+def version_tuple(v):
+ return [int(e) for e in v.split('.')]
+
+
+def is_outdated_version(version, limit, assume_new=True):
+ if not version:
+ return not assume_new
+ try:
+ return version_tuple(version) < version_tuple(limit)
+ except ValueError:
+ return not assume_new
-__version__ = '2014.08.05'
+__version__ = '2014.10.30'